[
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: Seafile CI\n\non: [push, pull_request]\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n\n    steps:\n      - uses: actions/checkout@v1\n        with:\n          fetch-depth: 1\n      - uses: actions/setup-python@v3\n        with:\n          python-version: \"3.12\"\n      - name: install dependencies and test\n        run: |\n          cd $GITHUB_WORKSPACE\n          ./ci/install-deps.sh\n          ./ci/run.py\n"
  },
  {
    "path": ".github/workflows/golangci-lint.yml",
    "content": "name: golangci-lint\n\non: [push, pull_request]\n\npermissions:\n  contents: read\n  # Optional: allow read access to pull request. Use with `only-new-issues` option.\n  # pull-requests: read\n\njobs:\n  golangci-fileserver:\n    name: lint\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions/setup-go@v5\n        with:\n          go-version: \"1.22\"\n      - name: golangci-lint\n        uses: golangci/golangci-lint-action@v6\n        with:\n          version: v1.59\n          working-directory: ./fileserver\n          args: --timeout=5m\n  golangci-notification-server:\n    name: lint\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: actions/setup-go@v5\n        with:\n          go-version: \"1.22\"\n      - name: golangci-lint\n        uses: golangci/golangci-lint-action@v6\n        with:\n          version: v1.59\n          working-directory: ./notification-server\n          args: --timeout=5m\n"
  },
  {
    "path": ".gitignore",
    "content": "*~\n*.bak\n*.o\n*.exe\ncscope*\n*#\nMakefile.in\nltmain.sh\nlibtool\n*.lo\n*.la\ninstall-sh\ndepcomp\nconfig.guess\nconfig.h\nconfig.log\nconfig.status\nconfig.sub\nconfig.cache\nconfigure\n*/.deps\nautom4te*\npo/POTFILES\npo/Makefile*\npo/stamp-it\npo/*.gmo\npo/*.pot\nmissing\nmkinstalldirs\nstamp-h1\n*.libs/\nMakefile\naclocal.m4\n*core\nm4/intltool.m4\nm4/libtool.m4\nm4/ltoptions.m4\nm4/ltsugar.m4\nm4/ltversion.m4\nm4/lt~obsolete.m4\nccnet-*.tar.gz\nconfig.h.in\npy-compile\nintltool-extract.in\nintltool-merge.in\nintltool-update.in\n*.stamp\n*.pyc\n*.tmp.ui\n*.defs\n*.log\n.deps\n*.db\n*.dll\n*.aps\n*.so\nbuild-stamp\ndebian/files\ndebian/seafile\ndebian/*.substvars\nlib/searpc-marshal.h\nlib/searpc-signature.h\nlib/*.tmp\nlib/dir.c\nlib/dirent.c\nlib/seafile-object.h\nlib/task.c\nlib/webaccess.c\nlib/branch.c\nlib/commit.c\nlib/crypt.c\nlib/repo.c\nlib/copy-task.c\nlib/search-result.c\nseaf-server\nseafserv-gc\nseaf-migrate\nseaf-fsck\nseaf-fuse\ncontroller/seafile-controller\ntools/seaf-server-init\ntests/conf/misc/\ntests/conf/seafile-data/\ntests/conf/ccnet.db\ntests/conf/ccnet.sock\ntests/conf/GroupMgr\ntests/conf/OrgMgr\ntests/conf/PeerMgr\n*.dylib\n.DS_Store\n*.pc\n*.tar.gz\n/compile\n/test-driver\n*.dmp\n/symbols\n__pycache__/\n.cache/\n"
  },
  {
    "path": "LICENSE.txt",
    "content": "This program is released under Affero GPLv3, with the following additional\npermission to link with OpenSSL library.\n\nIf you modify this program, or any covered work, by linking or\ncombining it with the OpenSSL project's OpenSSL library (or a\nmodified version of that library), containing parts covered by the\nterms of the OpenSSL or SSLeay licenses, Seafile Ltd.\ngrants you additional permission to convey the resulting work.\nCorresponding Source for a non-source form of such a combination\nshall include the source code for the parts of OpenSSL used as well\nas that of the covered work.\n\nThe source code files under 'python' directory is released under\nApache License v2.0. You can find Apache License 2.0 file in that\ndirectory.\n\n              GNU AFFERO GENERAL PUBLIC LICENSE\n\n                 Version 3, 19 November 2007\n\nCopyright © 2007 Free Software Foundation, Inc. <http://fsf.org/> \nEveryone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.\n\nPreamble\n\nThe GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software.\n\nThe licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users.\n\nWhen we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.\n\nDevelopers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software.\n\nA secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public.\n\nThe GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version.\n\nAn older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license.\n\nThe precise terms and conditions for copying, distribution and modification follow.\n\nTERMS AND CONDITIONS\n\n0. Definitions.\n\n\"This License\" refers to version 3 of the GNU Affero General Public License.\n\n\"Copyright\" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.\n\n\"The Program\" refers to any copyrightable work licensed under this License. Each licensee is addressed as \"you\". \"Licensees\" and \"recipients\" may be individuals or organizations.\n\nTo \"modify\" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a \"modified version\" of the earlier work or a work \"based on\" the earlier work.\n\nA \"covered work\" means either the unmodified Program or a work based on the Program.\n\nTo \"propagate\" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.\n\nTo \"convey\" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.\n\nAn interactive user interface displays \"Appropriate Legal Notices\" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.\n\n1. Source Code.\n\nThe \"source code\" for a work means the preferred form of the work for making modifications to it. \"Object code\" means any non-source form of a work.\n\nA \"Standard Interface\" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.\n\nThe \"System Libraries\" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A \"Major Component\", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.\n\nThe \"Corresponding Source\" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.\n\nThe Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.\n\nThe Corresponding Source for a work in source code form is that same work.\n\n2. Basic Permissions.\n\nAll rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.\n\nYou may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.\n\nConveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.\n\n3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\nNo covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.\n\nWhen you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.\n\n4. Conveying Verbatim Copies.\n\nYou may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.\n\nYou may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.\n\n5. Conveying Modified Source Versions.\n\nYou may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:\n\na) The work must carry prominent notices stating that you modified it, and giving a relevant date.\nb) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to \"keep intact all notices\".\nc) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.\nd) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.\nA compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an \"aggregate\" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.\n\n6. Conveying Non-Source Forms.\n\nYou may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:\n\na) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.\nb) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.\nc) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.\nd) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.\ne) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.\nA separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.\n\nA \"User Product\" is either (1) a \"consumer product\", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, \"normally used\" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.\n\n\"Installation Information\" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.\n\nIf you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).\n\nThe requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.\n\nCorresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.\n\n7. Additional Terms.\n\n\"Additional permissions\" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.\n\nWhen you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.\n\nNotwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:\n\na) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or\nb) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or\nc) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or\nd) Limiting the use for publicity purposes of names of licensors or authors of the material; or\ne) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or\nf) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.\nAll other non-permissive additional terms are considered \"further restrictions\" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.\n\nIf you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.\n\nAdditional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.\n\n8. Termination.\n\nYou may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).\n\nHowever, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.\n\nMoreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.\n\nTermination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.\n\n9. Acceptance Not Required for Having Copies.\n\nYou are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.\n\n10. Automatic Licensing of Downstream Recipients.\n\nEach time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.\n\nAn \"entity transaction\" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.\n\nYou may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.\n\n11. Patents.\n\nA \"contributor\" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's \"contributor version\".\n\nA contributor's \"essential patent claims\" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, \"control\" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.\n\nEach contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.\n\nIn the following three paragraphs, a \"patent license\" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To \"grant\" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.\n\nIf you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. \"Knowingly relying\" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.\n\nIf, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.\n\nA patent license is \"discriminatory\" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.\n\nNothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.\n\n12. No Surrender of Others' Freedom.\n\nIf conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.\n\n13. Remote Network Interaction; Use with the GNU General Public License.\n\nNotwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph.\n\nNotwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License.\n\n14. Revised Versions of this License.\n\nThe Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.\n\nEach version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License \"or any later version\" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation.\n\nIf the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.\n\nLater license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.\n\n15. Disclaimer of Warranty.\n\nTHERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n16. Limitation of Liability.\n\nIN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\n\n17. Interpretation of Sections 15 and 16.\n\nIf the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.\n\nEND OF TERMS AND CONDITIONS\n\nHow to Apply These Terms to Your New Programs\n\nIf you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.\n\nTo do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as\n    published by the Free Software Foundation, either version 3 of the\n    License, or (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\nAlso add information on how to contact you by electronic and paper mail.\n\nIf your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a \"Source\" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements.\n\nYou should also get your employer (if you work as a programmer) or school, if any, to sign a \"copyright disclaimer\" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "Makefile.am",
    "content": "MAKE_CLIENT =\n\nif WIN32\n  MAKE_CONTROLLER =\nelse\n  MAKE_CONTROLLER = controller\nendif\n\nif COMPILE_FUSE\n  MAKE_FUSE = fuse\nelse\n  MAKE_FUSE =\nendif\n\nMAKE_SERVER = server tools $(MAKE_CONTROLLER) $(MAKE_FUSE)\n\nSUBDIRS = include lib common python $(MAKE_SERVER) doc scripts\n\nDIST_SUBDIRS = include lib common python server tools controller fuse doc scripts\n\nINTLTOOL = \\\n\tintltool-extract.in \\\n\tintltool-merge.in \\\n\tintltool-update.in\n\nEXTRA_DIST = install-sh $(INTLTOOL) README.markdown scripts LICENSE.txt\n\nACLOCAL_AMFLAGS = -I m4\n\ndist-hook:\n\tgit log --format='%H' -1 > $(distdir)/latest_commit\n"
  },
  {
    "path": "README.markdown",
    "content": "Seafile Server Core [![Build Status](https://secure.travis-ci.org/haiwen/seafile-server.svg?branch=master)](http://travis-ci.org/haiwen/seafile-server)\n============\n\nSeafile is an open source cloud storage system with features on privacy protection and teamwork. Collections of files are called libraries, and each library can be synced separately. A library can also be encrypted with a user chosen password. Seafile also allows users to create groups and easily sharing files into groups.\n\nThis is the core component of Seafile server. It provides RPC to the web front-end (Seahub) to access files, and provides HTTP APIs to the desktop clients for syncing files.\n\nBuild and Run\n=============\n\nSee <https://manual.seafile.com/build_seafile/server/>\n\nContributing\n===========\n\nFor more informations read [Contribution](https://manual.seafile.com/contribution/).\n\nLicense\n=======\n\nThe Seafile server core is published under AGPLv3. Other components of Seafile have different licenses. Please refer to the coresponding projects.\n\nContact\n=======\n\nTwitter: @seafile <https://twitter.com/seafile>\n\nForum: <https://forum.seafile.com>\n"
  },
  {
    "path": "README.testing.md",
    "content": "# Seafile Server Tests\n\n## Run it locally\n\nTo run the tests, you need to install pytest first:\n\n```sh\npip install -r ci/requirements.txt\n```\n\nCompile and install ccnet-server and seafile-server\n```\ncd ccnet-server\nmake\nsudo make install\n\ncd seafile-server\nmake\nsudo make install\n```\n\nThen run the tests with\n```sh\ncd seafile-server\n./run_tests.sh\n```\n\nBy default the test script would try to start ccnet-server and seaf-server in `/usr/local/bin`, if you `make install` to another location, say `/opt/local`, run it like this:\n```sh\nSEAFILE_INSTALL_PREFIX=/opt/local ./run_tests.sh\n```\n"
  },
  {
    "path": "autogen.sh",
    "content": "#!/bin/bash\n# Run this to generate all the initial makefiles, etc.\n\n: ${AUTOCONF=autoconf}\n: ${AUTOHEADER=autoheader}\n: ${AUTOMAKE=automake}\n: ${ACLOCAL=aclocal}\nif test \"$(uname)\" != \"Darwin\"; then\n    : ${LIBTOOLIZE=libtoolize}\nelse\n    : ${LIBTOOLIZE=glibtoolize}\nfi\n: ${INTLTOOLIZE=intltoolize}\n: ${LIBTOOL=libtool}\n\nsrcdir=`dirname $0`\ntest -z \"$srcdir\" && srcdir=.\n\nORIGDIR=`pwd`\ncd $srcdir\nPROJECT=ccnet\nTEST_TYPE=-f\nFILE=net/main.c\nCONFIGURE=configure.ac\n\nDIE=0\n\n($AUTOCONF --version) < /dev/null > /dev/null 2>&1 || {\n\techo\n\techo \"You must have autoconf installed to compile $PROJECT.\"\n\techo \"Download the appropriate package for your distribution,\"\n\techo \"or get the source tarball at ftp://ftp.gnu.org/pub/gnu/\"\n\tDIE=1\n}\n\n(grep \"^AC_PROG_INTLTOOL\" $srcdir/$CONFIGURE >/dev/null) && {\n  ($INTLTOOLIZE --version) < /dev/null > /dev/null 2>&1 || {\n    echo\n    echo \"You must have \\`intltoolize' installed to compile $PROJECT.\"\n    echo \"Get ftp://ftp.gnome.org/pub/GNOME/stable/sources/intltool/intltool-0.22.tar.gz\"\n    echo \"(or a newer version if it is available)\"\n    DIE=1\n  }\n}\n\n($AUTOMAKE --version) < /dev/null > /dev/null 2>&1 || {\n\techo\n\techo \"You must have automake installed to compile $PROJECT.\"\n\techo \"Get ftp://sourceware.cygnus.com/pub/automake/automake-1.7.tar.gz\"\n\techo \"(or a newer version if it is available)\"\n\tDIE=1\n}\n\nif test \"$(uname)\" != \"Darwin\"; then\n(grep \"^AC_PROG_LIBTOOL\" $CONFIGURE >/dev/null) && {\n  ($LIBTOOL --version) < /dev/null > /dev/null 2>&1 || {\n    echo\n    echo \"**Error**: You must have \\`libtool' installed to compile $PROJECT.\"\n    echo \"Get ftp://ftp.gnu.org/pub/gnu/libtool-1.4.tar.gz\"\n    echo \"(or a newer version if it is available)\"\n    DIE=1\n  }\n}\nfi\n\n\nif grep \"^AM_[A-Z0-9_]\\{1,\\}_GETTEXT\" \"$CONFIGURE\" >/dev/null; then\n  if grep \"sed.*POTFILES\" \"$CONFIGURE\" >/dev/null; then\n    GETTEXTIZE=\"\"\n  else\n    if grep \"^AM_GLIB_GNU_GETTEXT\" \"$CONFIGURE\" >/dev/null; then\n      GETTEXTIZE=\"glib-gettextize\"\n      GETTEXTIZE_URL=\"ftp://ftp.gtk.org/pub/gtk/v2.0/glib-2.0.0.tar.gz\"\n    else\n      GETTEXTIZE=\"gettextize\"\n      GETTEXTIZE_URL=\"ftp://alpha.gnu.org/gnu/gettext-0.10.35.tar.gz\"\n    fi\n\n    $GETTEXTIZE --version < /dev/null > /dev/null 2>&1\n    if test $? -ne 0; then\n      echo\n      echo \"**Error**: You must have \\`$GETTEXTIZE' installed to compile $PKG_NAME.\"\n      echo \"Get $GETTEXTIZE_URL\"\n      echo \"(or a newer version if it is available)\"\n      DIE=1\n    fi\n  fi\nfi\n\nif test \"$DIE\" -eq 1; then\n\texit 1\nfi\n\ndr=`dirname .`\necho processing $dr\naclocalinclude=\"$aclocalinclude -I m4\"\n\nif test x\"$MSYSTEM\" = x\"MINGW32\"; then\n    aclocalinclude=\"$aclocalinclude -I /mingw32/share/aclocal\"\nelif test \"$(uname)\" = \"Darwin\"; then\n    aclocalinclude=\"$aclocalinclude -I /opt/local/share/aclocal\"\nfi\n\n\necho \"Creating $dr/aclocal.m4 ...\"\ntest -r $dr/aclocal.m4 || touch $dr/aclocal.m4\necho \"Running glib-gettextize...  Ignore non-fatal messages.\"\necho \"no\" | glib-gettextize --force --copy\n\necho \"Making $dr/aclocal.m4 writable ...\"\ntest -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4\n\necho \"Running intltoolize...\"\nintltoolize --copy --force --automake\n\necho \"Running $LIBTOOLIZE...\"\n$LIBTOOLIZE --force --copy\n\necho \"Running $ACLOCAL $aclocalinclude ...\"\n$ACLOCAL $aclocalinclude\n\necho \"Running $AUTOHEADER...\"\n$AUTOHEADER\n\necho \"Running $AUTOMAKE --gnu $am_opt ...\"\n$AUTOMAKE --add-missing --gnu $am_opt\n\necho \"Running $AUTOCONF ...\"\n$AUTOCONF\n"
  },
  {
    "path": "ci/install-deps.sh",
    "content": "#!/bin/bash\n\nset -e -x\n\nSCRIPT=${BASH_SOURCE[0]}\nTESTS_DIR=$(dirname \"${SCRIPT}\")/..\nSETUP_DIR=${TESTS_DIR}/ci\n\ncd $SETUP_DIR\n\nsudo systemctl start mysql.service\nsudo apt-get update --fix-missing\nsudo apt-get install -y intltool libarchive-dev libcurl4-openssl-dev libevent-dev \\\nlibfuse-dev libglib2.0-dev libjansson-dev libmysqlclient-dev libonig-dev \\\nsqlite3 libsqlite3-dev libtool net-tools uuid-dev valac libargon2-dev\nsudo systemctl start mysql.service\n\npip install -r requirements.txt\n"
  },
  {
    "path": "ci/requirements.txt",
    "content": "termcolor>=1.1.0\nrequests>=2.8.0\npytest>=3.3.2\nbackports.functools_lru_cache>=1.4\ntenacity>=4.8.0\nfuture\nrequests-toolbelt\n"
  },
  {
    "path": "ci/run.py",
    "content": "#!/usr/bin/env python\n\"\"\"\nInstall dir: ~/opt/local\nData dir: /tmp/haiwen\n\"\"\"\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport re\nimport sys\nfrom os.path import abspath, basename, exists, expanduser, join\n\nimport requests\nimport termcolor\nimport site\n\nfrom serverctl import ServerCtl\nfrom utils import (\n    cd, chdir, debug, green, info, lru_cache, mkdirs, on_github_actions, red,\n    setup_logging, shell, warning\n)\n\nlogger = logging.getLogger(__name__)\n\nTOPDIR = abspath(join(os.getcwd(), '..'))\nif on_github_actions():\n    PREFIX = expanduser('~/opt/local')\nelse:\n    PREFIX = os.environ.get('SEAFILE_INSTALL_PREFIX', '/usr/local')\nINSTALLDIR = '/tmp/seafile-tests'\n\n\ndef num_jobs():\n    return int(os.environ.get('NUM_JOBS', 2))\n\n\n@lru_cache()\ndef make_build_env():\n    env = dict(os.environ)\n    libsearpc_dir = abspath(join(TOPDIR, 'libsearpc'))\n    ccnet_dir = abspath(join(TOPDIR, 'ccnet-server'))\n\n    def _env_add(*a, **kw):\n        kw['env'] = env\n        return prepend_env_value(*a, **kw)\n\n    _env_add('CPPFLAGS', '-I%s' % join(PREFIX, 'include'), seperator=' ')\n\n    _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib'), seperator=' ')\n\n    _env_add('LDFLAGS', '-L%s' % join(PREFIX, 'lib64'), seperator=' ')\n\n    _env_add('PATH', join(PREFIX, 'bin'))\n\n    py_version = '.'.join(map(str, sys.version_info[:3]))\n    if on_github_actions():\n        _env_add('PYTHONPATH', join(os.environ.get('RUNNER_TOOL_CACHE'), 'Python/{py_version}/x64/lib/python3.12/site-packages'))\n    _env_add('PYTHONPATH', join(PREFIX, 'lib/python3.12/site-packages'))\n    _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib', 'pkgconfig'))\n    _env_add('PKG_CONFIG_PATH', join(PREFIX, 'lib64', 'pkgconfig'))\n    _env_add('PKG_CONFIG_PATH', libsearpc_dir)\n    _env_add('PKG_CONFIG_PATH', ccnet_dir)\n    _env_add('LD_LIBRARY_PATH', join(PREFIX, 'lib'))\n\n    _env_add('JWT_PRIVATE_KEY', '@%ukmcl$k=9u-grs4azdljk(sn0kd!=mzc17xd7x8#!u$1x@kl')\n\n    _env_add('SEAFILE_MYSQL_DB_CCNET_DB_NAME', 'ccnet')\n\n    # Prepend the seafile-server/python to PYTHONPATH so we don't need to \"make\n    # install\" each time after editing python files.\n    _env_add('PYTHONPATH', join(SeafileServer().projectdir, 'python'))\n\n    for key in ('PATH', 'PKG_CONFIG_PATH', 'CPPFLAGS', 'LDFLAGS', 'PYTHONPATH'):\n        info('%s: %s', key, env.get(key, ''))\n    return env\n\n\ndef prepend_env_value(name, value, seperator=':', env=None):\n    '''append a new value to a list'''\n    env = env or os.environ\n    current_value = env.get(name, '')\n    new_value = value\n    if current_value:\n        new_value += seperator + current_value\n\n    env[name] = new_value\n    return env\n\n\n@lru_cache()\ndef get_branch_json_file():\n    url = 'https://raw.githubusercontent.com/haiwen/seafile-test-deploy/master/branches.json'\n    return requests.get(url).json()\n\n\ndef get_project_branch(project, default_branch='master'):\n    travis_branch = os.environ.get('TRAVIS_BRANCH', 'master')\n    if project.name == 'seafile-server':\n        return travis_branch\n    conf = get_branch_json_file()\n    return conf.get(travis_branch, {}).get(project.name, default_branch)\n\n\nclass Project(object):\n    def __init__(self, name):\n        self.name = name\n        self.version = ''\n\n    @property\n    def url(self):\n        return 'https://www.github.com/haiwen/{}.git'.format(self.name)\n\n    @property\n    def projectdir(self):\n        return join(TOPDIR, self.name)\n\n    def branch(self):\n        return get_project_branch(self)\n\n    def clone(self):\n        if exists(self.name):\n            with cd(self.name):\n                shell('git fetch origin --tags')\n        else:\n            shell(\n                'git clone --depth=1 --branch {} {}'.\n                format(self.branch(), self.url)\n            )\n\n    @chdir\n    def compile_and_install(self):\n        cmds = [\n            './autogen.sh',\n            './configure --prefix={}'.format(PREFIX),\n            'make -j{} V=0'.format(num_jobs()),\n            'make install',\n        ]\n        for cmd in cmds:\n            shell(cmd)\n\n    @chdir\n    def use_branch(self, branch):\n        shell('git checkout {}'.format(branch))\n\n\nclass Libsearpc(Project):\n    def __init__(self):\n        super(Libsearpc, self).__init__('libsearpc')\n\n    def branch(self):\n        return 'master'\n\n\nclass CcnetServer(Project):\n    def __init__(self):\n        super(CcnetServer, self).__init__('ccnet-server')\n\n    def branch(self):\n        return '7.1'\n\n\nclass SeafileServer(Project):\n    def __init__(self):\n        super(SeafileServer, self).__init__('seafile-server')\n\nclass Libevhtp(Project):\n    def __init__(self):\n        super(Libevhtp, self).__init__('libevhtp')\n\n    def branch(self):\n        return 'master'\n\n    @chdir\n    def compile_and_install(self):\n        cmds = [\n            'cmake -DEVHTP_DISABLE_SSL=ON -DEVHTP_BUILD_SHARED=OFF -DCMAKE_POLICY_VERSION_MINIMUM=3.5 .',\n            'make',\n            'sudo make install',\n            'sudo ldconfig',\n        ]\n\n        for cmd in cmds:\n            shell(cmd)\n\nclass Libjwt(Project):\n    def __init__(self):\n        super(Libjwt, self).__init__('libjwt')\n\n    def branch(self):\n        return 'v1.13.1'\n\n    @property\n    def url(self):\n        return 'https://www.github.com/benmcollins/libjwt.git'\n\n    @chdir\n    def compile_and_install(self):\n        cmds = [\n            'autoreconf -i',\n            './configure',\n            'sudo make all',\n            'sudo make install',\n        ]\n\n        for cmd in cmds:\n            shell(cmd)\n\nclass Libhiredis(Project):\n    def __init__(self):\n        super(Libhiredis, self).__init__('hiredis')\n\n    def branch(self):\n        return 'v1.1.0'\n\n    @property\n    def url(self):\n        return 'https://github.com/redis/hiredis.git'\n\n    @chdir\n    def compile_and_install(self):\n        cmds = [\n            'sudo make',\n            'sudo make install',\n        ]\n\n        for cmd in cmds:\n            shell(cmd)\n\ndef fetch_and_build():\n    libsearpc = Libsearpc()\n    libjwt = Libjwt()\n    libhiredis = Libhiredis()\n    libevhtp = Libevhtp()\n    ccnet = CcnetServer()\n    seafile = SeafileServer()\n\n    libsearpc.clone()\n    libjwt.clone()\n    libhiredis.clone()\n    libevhtp.clone()\n    ccnet.clone()\n\n    libsearpc.compile_and_install()\n    libjwt.compile_and_install()\n    libhiredis.compile_and_install()\n    libevhtp.compile_and_install()\n    seafile.compile_and_install()\n\n\ndef parse_args():\n    ap = argparse.ArgumentParser()\n    ap.add_argument('-v', '--verbose', action='store_true')\n    ap.add_argument('-t', '--test-only', action='store_true')\n\n    return ap.parse_args()\n\n\ndef main():\n    mkdirs(INSTALLDIR)\n    os.environ.update(make_build_env())\n    args = parse_args()\n    if on_github_actions() and not args.test_only:\n        fetch_and_build()\n    dbs = ('mysql',)\n    for db in dbs:\n        start_and_test_with_db(db)\n\n\ndef start_and_test_with_db(db):\n    if db == 'sqlite3':\n        fileservers = ('c_fileserver',)\n    else:\n        fileservers = ('go_fileserver', 'c_fileserver')\n    for fileserver in fileservers:\n        shell('rm -rf {}/*'.format(INSTALLDIR))\n        info('Setting up seafile server with %s database, use %s', db, fileserver)\n        server = ServerCtl(\n            TOPDIR,\n            SeafileServer().projectdir,\n            INSTALLDIR,\n            fileserver,\n            db=db,\n            # Use the newly built seaf-server (to avoid \"make install\" each time when developping locally)\n            seaf_server_bin=join(SeafileServer().projectdir, 'server/seaf-server')\n        )\n        server.setup()\n        with server.run():\n            info('Testing with %s database', db)\n            with cd(SeafileServer().projectdir):\n                shell('py.test', env=server.get_seaserv_envs())\n\n\nif __name__ == '__main__':\n    os.chdir(TOPDIR)\n    setup_logging()\n    main()\n"
  },
  {
    "path": "ci/serverctl.py",
    "content": "#!/usr/bin/env python\n#coding: UTF-8\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport re\nimport sys\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom os.path import abspath, basename, dirname, exists, join\n\nimport requests\nfrom tenacity import TryAgain, retry, stop_after_attempt, wait_fixed\n\nfrom utils import (\n    cd, chdir, debug, green, info, mkdirs, red, setup_logging, shell, warning\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServerCtl(object):\n    def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'):\n        self.db = db\n        self.topdir = topdir\n        self.datadir = datadir\n        self.central_conf_dir = join(datadir, 'conf')\n        self.seafile_conf_dir = join(datadir, 'seafile-data')\n        self.ccnet_conf_dir = join(datadir, 'ccnet')\n\n        self.log_dir = join(datadir, 'logs')\n        mkdirs(self.log_dir)\n        self.ccnet_log = join(self.log_dir, 'ccnet.log')\n        self.seafile_log = join(self.log_dir, 'seafile.log')\n        self.fileserver_log = join(self.log_dir, 'fileserver.log')\n\n        self.ccnet_server_bin = ccnet_server_bin\n        self.seaf_server_bin = seaf_server_bin\n\n        self.sql_dir = join(topdir, 'seafile-server', 'scripts', 'sql')\n\n        self.ccnet_proc = None\n        self.seafile_proc = None\n        self.fileserver_proc = None\n        self.projectdir = projectdir\n        self.fileserver = fileserver\n\n    def setup(self):\n        if self.db == 'mysql':\n            create_mysql_dbs()\n\n        os.mkdir (self.central_conf_dir, 0o755)\n        os.mkdir (self.seafile_conf_dir, 0o755)\n        os.mkdir (self.ccnet_conf_dir, 0o755)\n\n        self.init_seafile()\n\n    def init_seafile(self):\n        seafile_conf = join(self.central_conf_dir, 'seafile.conf')\n        if self.fileserver == 'go_fileserver':\n            seafile_fileserver_conf = '''\\\n[fileserver]\nuse_go_fileserver = true\nport=8082\n'''\n        else:\n            seafile_fileserver_conf = '''\\\n[fileserver]\nport=8082\n'''\n        with open(seafile_conf, 'a+') as fp:\n            fp.write('\\n')\n            fp.write(seafile_fileserver_conf)\n\n        if self.db == 'mysql':\n            self.add_seafile_db_conf()\n        else:\n            self.add_seafile_sqlite_db_conf()\n\n    def add_seafile_sqlite_db_conf(self):\n        seafile_conf = join(self.central_conf_dir, 'seafile.conf')\n        seafile_db_conf = '''\\\n[database]\n'''\n        with open(seafile_conf, 'a+') as fp:\n            fp.write('\\n')\n            fp.write(seafile_db_conf)\n\n    def add_seafile_db_conf(self):\n        seafile_conf = join(self.central_conf_dir, 'seafile.conf')\n        seafile_db_conf = '''\\\n[database]\ntype = mysql\nhost = 127.0.0.1\nport = 3306\nuser = seafile\npassword = seafile\ndb_name = seafile\nconnection_charset = utf8\n'''\n        with open(seafile_conf, 'a+') as fp:\n            fp.write('\\n')\n            fp.write(seafile_db_conf)\n\n    @contextmanager\n    def run(self):\n        try:\n            self.start()\n            yield self\n        except:\n            self.print_logs()\n            raise\n        finally:\n            self.stop()\n\n    def print_logs(self):\n        for logfile in self.ccnet_log, self.seafile_log:\n            if exists(logfile):\n                shell(f'cat {logfile}')\n\n    @retry(wait=wait_fixed(1), stop=stop_after_attempt(10))\n    def wait_ccnet_ready(self):\n        if not exists(join(self.ccnet_conf_dir, 'ccnet-rpc.sock')):\n            raise TryAgain\n\n    def start(self):\n        logger.info('Starting to create ccnet and seafile db tables')\n        self.create_database_tables()\n        logger.info('Starting seafile server')\n        self.start_seafile()\n        self.start_fileserver()\n\n    def create_database_tables(self):\n        if self.db == 'mysql':\n           ccnet_sql_path = join(self.sql_dir, 'mysql', 'ccnet.sql')\n           seafile_sql_path = join(self.sql_dir, 'mysql', 'seafile.sql')\n           sql = f'USE ccnet; source {ccnet_sql_path}; USE seafile; source {seafile_sql_path};'.encode()\n           shell('sudo mysql -u root -proot', inputdata=sql, wait=False)\n        else:\n           config_sql_path = join(self.sql_dir, 'sqlite', 'config.sql')\n           groupmgr_sql_path = join(self.sql_dir, 'sqlite', 'groupmgr.sql')\n           org_sql_path = join(self.sql_dir, 'sqlite', 'org.sql')\n           user_sql_path = join(self.sql_dir, 'sqlite', 'user.sql')\n           seafile_sql_path = join(self.sql_dir, 'sqlite', 'seafile.sql')\n\n           misc_dir = join(self.ccnet_conf_dir, 'misc')\n           os.mkdir (misc_dir, 0o755)\n           groupmgr_dir = join(self.ccnet_conf_dir, 'GroupMgr')\n           os.mkdir (groupmgr_dir, 0o755)\n           orgmgr_dir = join(self.ccnet_conf_dir, 'OrgMgr')\n           os.mkdir (orgmgr_dir, 0o755)\n           usermgr_dir = join(self.ccnet_conf_dir, 'PeerMgr')\n           os.mkdir (usermgr_dir, 0o755)\n\n           config_db_path = join(misc_dir, 'config.db')\n           groupmgr_db_path = join(groupmgr_dir, 'groupmgr.db')\n           orgmgr_db_path = join(orgmgr_dir, 'orgmgr.db')\n           usermgr_db_path = join(usermgr_dir, 'usermgr.db')\n           seafile_db_path = join(self.seafile_conf_dir, 'seafile.db')\n\n           sql = f'.read {config_sql_path}'.encode()\n           shell('sqlite3 ' + config_db_path, inputdata=sql, wait=False)\n           sql = f'.read {groupmgr_sql_path}'.encode()\n           shell('sqlite3 ' + groupmgr_db_path, inputdata=sql, wait=False)\n           sql = f'.read {org_sql_path}'.encode()\n           shell('sqlite3 ' + orgmgr_db_path, inputdata=sql, wait=False)\n           sql = f'.read {user_sql_path}'.encode()\n           shell('sqlite3 ' + usermgr_db_path, inputdata=sql, wait=False)\n           sql = f'.read {seafile_sql_path}'.encode()\n           shell('sqlite3 ' + seafile_db_path, inputdata=sql, wait=False)\n\n    def start_ccnet(self):\n        cmd = [\n            self.ccnet_server_bin,\n            \"-F\",\n            self.central_conf_dir,\n            \"-c\",\n            self.ccnet_conf_dir,\n            \"-f\",\n            self.ccnet_log,\n        ]\n        self.ccnet_proc = shell(cmd, wait=False)\n\n    def start_seafile(self):\n        cmd = [\n            self.seaf_server_bin,\n            \"-F\",\n            self.central_conf_dir,\n            \"-c\",\n            self.ccnet_conf_dir,\n            \"-d\",\n            self.seafile_conf_dir,\n            \"-l\",\n            self.seafile_log,\n            \"-f\",\n        ]\n        self.seafile_proc = shell(cmd, wait=False)\n\n    def start_fileserver(self):\n        cmd = [\n            \"./fileserver\",\n            \"-F\",\n            self.central_conf_dir,\n            \"-d\",\n            self.seafile_conf_dir,\n            \"-l\",\n            self.fileserver_log,\n        ]\n        fileserver_path = join(self.projectdir, 'fileserver')\n        with cd(fileserver_path):\n            shell(\"go build\")\n            self.fileserver_proc = shell(cmd, wait=False)\n\n\n    def stop(self):\n        if self.ccnet_proc:\n            logger.info('Stopping ccnet server')\n            self.ccnet_proc.kill()\n        if self.seafile_proc:\n            logger.info('Stopping seafile server')\n            self.seafile_proc.kill()\n        if self.fileserver_proc:\n            logger.info('Stopping go fileserver')\n            self.fileserver_proc.kill()\n        if self.db == 'mysql':\n            del_mysql_dbs()\n\n    def get_seaserv_envs(self):\n        envs = dict(os.environ)\n        envs.update({\n            'SEAFILE_CENTRAL_CONF_DIR': self.central_conf_dir,\n            'CCNET_CONF_DIR': self.ccnet_conf_dir,\n            'SEAFILE_CONF_DIR': self.seafile_conf_dir,\n            'SEAFILE_MYSQL_DB_CCNET_DB_NAME': 'ccnet',\n        })\n        return envs\n\n\ndef create_mysql_dbs():\n    sql = b'''\\\ncreate database `ccnet` character set = 'utf8';\ncreate database `seafile` character set = 'utf8';\n\ncreate user 'seafile'@'localhost' identified by 'seafile';\n\nGRANT ALL PRIVILEGES ON `ccnet`.* to `seafile`@localhost;\nGRANT ALL PRIVILEGES ON `seafile`.* to `seafile`@localhost;\n    '''\n\n    shell('sudo mysql -u root -proot', inputdata=sql)\n\ndef del_mysql_dbs():\n    sql = b'''\\\ndrop database `ccnet`;\ndrop database `seafile`;\ndrop user 'seafile'@'localhost';\n    '''\n\n    shell('sudo mysql -u root -proot', inputdata=sql)\n"
  },
  {
    "path": "ci/utils.py",
    "content": "#coding: UTF-8\n\nimport logging\nimport os\nimport re\nimport sys\nfrom contextlib import contextmanager\nfrom os.path import abspath, basename, exists, expanduser, join\nfrom subprocess import PIPE, CalledProcessError, Popen\n\nimport requests\nimport termcolor\n\ntry:\n    from functools import lru_cache\nexcept ImportError:\n    from backports.functools_lru_cache import lru_cache\n\nlogger = logging.getLogger(__name__)\n\n\ndef _color(s, color):\n    return s if not os.isatty(sys.stdout.fileno()) \\\n        else termcolor.colored(str(s), color)\n\n\ndef green(s):\n    return _color(s, 'green')\n\n\ndef red(s):\n    return _color(s, 'red')\n\n\ndef debug(fmt, *a):\n    logger.debug(green(fmt), *a)\n\n\ndef info(fmt, *a):\n    logger.info(green(fmt), *a)\n\n\ndef warning(fmt, *a):\n    logger.warn(red(fmt), *a)\n\n\ndef shell(cmd, inputdata=None, wait=True, **kw):\n    info('calling \"%s\" in %s', cmd, kw.get('cwd', os.getcwd()))\n    kw['shell'] = not isinstance(cmd, list)\n    kw['stdin'] = PIPE if inputdata else None\n    p = Popen(cmd, **kw)\n    if inputdata:\n        p.communicate(inputdata)\n    if wait:\n        p.wait()\n        if p.returncode:\n            raise CalledProcessError(p.returncode, cmd)\n    else:\n        return p\n\n\n@contextmanager\ndef cd(path):\n    olddir = os.getcwd()\n    os.chdir(path)\n    try:\n        yield\n    finally:\n        os.chdir(olddir)\n\n\ndef chdir(func):\n    def wrapped(self, *w, **kw):\n        with cd(self.projectdir):\n            return func(self, *w, **kw)\n\n    return wrapped\n\n\ndef setup_logging():\n    kw = {\n        'format': '[%(asctime)s][%(module)s]: %(message)s',\n        'datefmt': '%m/%d/%Y %H:%M:%S',\n        'level': logging.DEBUG,\n        'stream': sys.stdout,\n    }\n\n    logging.basicConfig(**kw)\n    logging.getLogger('requests.packages.urllib3.connectionpool'\n                      ).setLevel(logging.WARNING)\n\n\ndef mkdirs(*paths):\n    for path in paths:\n        if not exists(path):\n            os.mkdir(path)\n\ndef on_github_actions():\n    return 'GITHUB_ACTIONS' in os.environ\n\n@contextmanager\ndef cd(path):\n    path = expanduser(path)\n    olddir = os.getcwd()\n    os.chdir(path)\n    try:\n        yield\n    finally:\n        os.chdir(olddir)\n"
  },
  {
    "path": "common/Makefile.am",
    "content": "SUBDIRS = cdc\n\nproc_headers = \\\n\t$(addprefix processors/, \\\n\tobjecttx-common.h)\n\nnoinst_HEADERS = \\\n\tdiff-simple.h \\\n\tseafile-crypt.h \\\n\tpassword-hash.h \\\n\tcommon.h \\\n\tbranch-mgr.h \\\n\tfs-mgr.h \\\n\tblock-mgr.h \\\n\tcommit-mgr.h \\\n\tlog.h \\\n\tobject-list.h \\\n\tvc-common.h \\\n\tseaf-utils.h \\\n\tobj-store.h \\\n\tobj-backend.h \\\n\tblock-backend.h \\\n\tblock.h \\\n\tmq-mgr.h \\\n\tseaf-db.h \\\n\tconfig-mgr.h \\\n\tmerge-new.h \\\n\tblock-tx-utils.h \\\n\tsync-repo-common.h \\\n\t$(proc_headers)\n"
  },
  {
    "path": "common/block-backend-fs.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef _WIN32_WINNT\n#define _WIN32_WINNT 0x500\n#endif\n\n#include \"common.h\"\n\n#include \"utils.h\"\n\n#include \"log.h\"\n\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <dirent.h>\n\n#include \"block-backend.h\"\n#include \"obj-store.h\"\n\n\nstruct _BHandle {\n    char    *store_id;\n    int     version;\n    char    block_id[41];\n    int     fd;\n    int     rw_type;\n    char    *tmp_file;\n};\n\ntypedef struct {\n    char          *block_dir;\n    int            block_dir_len;\n    char          *tmp_dir;\n    int            tmp_dir_len;\n} FsPriv;\n\nstatic char *\nget_block_path (BlockBackend *bend,\n                const char *block_sha1,\n                char path[],\n                const char *store_id,\n                int version);\n\nstatic int\nopen_tmp_file (BlockBackend *bend,\n               const char *basename,\n               char **path);\n\nstatic BHandle *\nblock_backend_fs_open_block (BlockBackend *bend,\n                             const char *store_id,\n                             int version,\n                             const char *block_id,\n                             int rw_type)\n{\n    BHandle *handle;\n    int fd = -1;\n    char *tmp_file;\n\n    g_return_val_if_fail (block_id != NULL, NULL);\n    g_return_val_if_fail (strlen(block_id) == 40, NULL);\n    g_return_val_if_fail (rw_type == BLOCK_READ || rw_type == BLOCK_WRITE, NULL);\n\n    if (rw_type == BLOCK_READ) {\n        char path[SEAF_PATH_MAX];\n        get_block_path (bend, block_id, path, store_id, version);\n        fd = g_open (path, O_RDONLY | O_BINARY, 0);\n        if (fd < 0) {\n            ccnet_warning (\"[block bend] failed to open block %s for read: %s\\n\",\n                           block_id, strerror(errno));\n            return NULL;\n        }\n    } else {\n        fd = open_tmp_file (bend, block_id, &tmp_file);\n        if (fd < 0) {\n            ccnet_warning (\"[block bend] failed to open block %s for write: %s\\n\",\n                           block_id, strerror(errno));\n            return NULL;\n        }\n    }\n\n    handle = g_new0(BHandle, 1);\n    handle->fd = fd;\n    memcpy (handle->block_id, block_id, 41);\n    handle->rw_type = rw_type;\n    if (rw_type == BLOCK_WRITE)\n        handle->tmp_file = tmp_file;\n    if (store_id)\n        handle->store_id = g_strdup(store_id);\n    handle->version = version;\n\n    return handle;\n}\n\nstatic int\nblock_backend_fs_read_block (BlockBackend *bend,\n                             BHandle *handle,\n                             void *buf, int len)\n{\n    int ret;\n\n    ret = readn (handle->fd, buf, len);\n    if (ret < 0)\n        seaf_warning (\"Failed to read block %s:%s: %s.\\n\",\n                      handle->store_id, handle->block_id, strerror (errno));\n\n    return ret;\n}\n\nstatic int\nblock_backend_fs_write_block (BlockBackend *bend,\n                                BHandle *handle,\n                                const void *buf, int len)\n{\n    int ret;\n\n    ret = writen (handle->fd, buf, len);\n    if (ret < 0)\n        seaf_warning (\"Failed to write block %s:%s: %s.\\n\",\n                      handle->store_id, handle->block_id, strerror (errno));\n\n    return ret;\n}\n\nstatic int\nblock_backend_fs_close_block (BlockBackend *bend,\n                                BHandle *handle)\n{\n    int ret;\n\n    ret = close (handle->fd);\n\n    return ret;\n}\n\nstatic void\nblock_backend_fs_block_handle_free (BlockBackend *bend,\n                                    BHandle *handle)\n{\n    if (handle->rw_type == BLOCK_WRITE) {\n        /* make sure the tmp file is removed even on failure. */\n        g_unlink (handle->tmp_file);\n        g_free (handle->tmp_file);\n    }\n    g_free (handle->store_id);\n    g_free (handle);\n}\n\nstatic int\ncreate_parent_path (const char *path)\n{\n    char *dir = g_path_get_dirname (path);\n    if (!dir)\n        return -1;\n\n    if (g_file_test (dir, G_FILE_TEST_EXISTS)) {\n        g_free (dir);\n        return 0;\n    }\n\n    if (g_mkdir_with_parents (dir, 0777) < 0) {\n        seaf_warning (\"Failed to create object parent path: %s.\\n\", dir);\n        g_free (dir);\n        return -1;\n    }\n\n    g_free (dir);\n    return 0;\n}\n\nstatic int\nblock_backend_fs_commit_block (BlockBackend *bend,\n                               BHandle *handle)\n{\n    char path[SEAF_PATH_MAX];\n\n    g_return_val_if_fail (handle->rw_type == BLOCK_WRITE, -1);\n\n    get_block_path (bend, handle->block_id, path, handle->store_id, handle->version);\n\n    if (create_parent_path (path) < 0) {\n        seaf_warning (\"Failed to create path for block %s:%s.\\n\",\n                      handle->store_id, handle->block_id);\n        return -1;\n    }\n\n    if (g_rename (handle->tmp_file, path) < 0) {\n        seaf_warning (\"[block bend] failed to commit block %s:%s: %s\\n\",\n                      handle->store_id, handle->block_id, strerror(errno));\n        return -1;\n    }\n\n    return 0;\n}\n    \nstatic gboolean\nblock_backend_fs_block_exists (BlockBackend *bend,\n                               const char *store_id,\n                               int version,\n                               const char *block_sha1)\n{\n    char block_path[SEAF_PATH_MAX];\n\n    get_block_path (bend, block_sha1, block_path, store_id, version);\n    if (g_access (block_path, F_OK) == 0)\n        return TRUE;\n    else\n        return FALSE;\n}\n\nstatic int\nblock_backend_fs_remove_block (BlockBackend *bend,\n                               const char *store_id,\n                               int version,\n                               const char *block_id)\n{\n    char path[SEAF_PATH_MAX];\n\n    get_block_path (bend, block_id, path, store_id, version);\n\n    return g_unlink (path);\n}\n\nstatic BMetadata *\nblock_backend_fs_stat_block (BlockBackend *bend,\n                             const char *store_id,\n                             int version,\n                             const char *block_id)\n{\n    char path[SEAF_PATH_MAX];\n    SeafStat st;\n    BMetadata *block_md;\n\n    get_block_path (bend, block_id, path, store_id, version);\n    if (seaf_stat (path, &st) < 0) {\n        seaf_warning (\"[block bend] Failed to stat block %s:%s at %s: %s.\\n\",\n                      store_id, block_id, path, strerror(errno));\n        return NULL;\n    }\n    block_md = g_new0(BMetadata, 1);\n    memcpy (block_md->id, block_id, 40);\n    block_md->size = (uint32_t) st.st_size;\n\n    return block_md;\n}\n\nstatic BMetadata *\nblock_backend_fs_stat_block_by_handle (BlockBackend *bend,\n                                       BHandle *handle)\n{\n    SeafStat st;\n    BMetadata *block_md;\n\n    if (seaf_fstat (handle->fd, &st) < 0) {\n        seaf_warning (\"[block bend] Failed to stat block %s:%s.\\n\",\n                      handle->store_id, handle->block_id);\n        return NULL;\n    }\n    block_md = g_new0(BMetadata, 1);\n    memcpy (block_md->id, handle->block_id, 40);\n    block_md->size = (uint32_t) st.st_size;\n\n    return block_md;\n}\n\nstatic int\nblock_backend_fs_foreach_block (BlockBackend *bend,\n                                const char *store_id,\n                                int version,\n                                SeafBlockFunc process,\n                                void *user_data)\n{\n    FsPriv *priv = bend->be_priv;\n    char *block_dir = NULL;\n    int dir_len;\n    GDir *dir1 = NULL, *dir2;\n    const char *dname1, *dname2;\n    char block_id[128];\n    char path[SEAF_PATH_MAX], *pos;\n    int ret = 0;\n\n#if defined MIGRATION\n    if (version > 0)\n        block_dir = g_build_filename (priv->block_dir, store_id, NULL);\n#else\n    block_dir = g_build_filename (priv->block_dir, store_id, NULL);\n#endif\n    dir_len = strlen (block_dir);\n\n    dir1 = g_dir_open (block_dir, 0, NULL);\n    if (!dir1) {\n        goto out;\n    }\n\n    memcpy (path, block_dir, dir_len);\n    pos = path + dir_len;\n\n    while ((dname1 = g_dir_read_name(dir1)) != NULL) {\n        snprintf (pos, sizeof(path) - dir_len, \"/%s\", dname1);\n\n        dir2 = g_dir_open (path, 0, NULL);\n        if (!dir2) {\n            seaf_warning (\"Failed to open block dir %s.\\n\", path);\n            continue;\n        }\n\n        while ((dname2 = g_dir_read_name(dir2)) != NULL) {\n            snprintf (block_id, sizeof(block_id), \"%s%s\", dname1, dname2);\n            if (!process (store_id, version, block_id, user_data)) {\n                g_dir_close (dir2);\n                goto out;\n            }\n        }\n        g_dir_close (dir2);\n    }\n\nout:\n    if (dir1)\n        g_dir_close (dir1);\n    g_free (block_dir);\n\n    return ret;\n}\n\nstatic int\nblock_backend_fs_copy (BlockBackend *bend,\n                       const char *src_store_id,\n                       int src_version,\n                       const char *dst_store_id,\n                       int dst_version,\n                       const char *block_id)\n{\n    char src_path[SEAF_PATH_MAX];\n    char dst_path[SEAF_PATH_MAX];\n\n    get_block_path (bend, block_id, src_path, src_store_id, src_version);\n    get_block_path (bend, block_id, dst_path, dst_store_id, dst_version);\n\n    if (g_file_test (dst_path, G_FILE_TEST_EXISTS))\n        return 0;\n\n    if (create_parent_path (dst_path) < 0) {\n        seaf_warning (\"Failed to create dst path %s for block %s.\\n\",\n                      dst_path, block_id);\n        return -1;\n    }\n\n#ifdef WIN32\n    if (!CreateHardLink (dst_path, src_path, NULL)) {\n        seaf_warning (\"Failed to link %s to %s: %lu.\\n\",\n                      src_path, dst_path, GetLastError());\n        return -1;\n    }\n    return 0;\n#else\n    int ret = link (src_path, dst_path);\n    if (ret < 0 && errno != EEXIST) {\n        seaf_warning (\"Failed to link %s to %s: %s.\\n\",\n                      src_path, dst_path, strerror(errno));\n        return -1;\n    }\n    return ret;\n#endif\n}\n\nstatic int\nblock_backend_fs_remove_store (BlockBackend *bend, const char *store_id)\n{\n    FsPriv *priv = bend->be_priv;\n    char *block_dir = NULL;\n    GDir *dir1, *dir2;\n    const char *dname1, *dname2;\n    char *path1, *path2;\n\n    block_dir = g_build_filename (priv->block_dir, store_id, NULL);\n\n    dir1 = g_dir_open (block_dir, 0, NULL);\n    if (!dir1) {\n        g_free (block_dir);\n        return 0;\n    }\n\n    while ((dname1 = g_dir_read_name(dir1)) != NULL) {\n        path1 = g_build_filename (block_dir, dname1, NULL);\n\n        dir2 = g_dir_open (path1, 0, NULL);\n        if (!dir2) {\n            seaf_warning (\"Failed to open block dir %s.\\n\", path1);\n            g_dir_close (dir1);\n            g_free (path1);\n            g_free (block_dir);\n            return -1;\n        }\n\n        while ((dname2 = g_dir_read_name(dir2)) != NULL) {\n            path2 = g_build_filename (path1, dname2, NULL);\n            g_unlink (path2);\n            g_free (path2);\n        }\n        g_dir_close (dir2);\n\n        g_rmdir (path1);\n        g_free (path1);\n    }\n\n    g_dir_close (dir1);\n    g_rmdir (block_dir);\n    g_free (block_dir);\n\n    return 0;\n}\n\nstatic char *\nget_block_path (BlockBackend *bend,\n                const char *block_sha1,\n                char path[],\n                const char *store_id,\n                int version)\n{\n    FsPriv *priv = bend->be_priv;\n    char *pos = path;\n    int n;\n\n#if defined MIGRATION\n    if (version > 0) {\n        n = snprintf (path, SEAF_PATH_MAX, \"%s/%s/\", priv->block_dir, store_id);\n        pos += n;\n    } else\n#else\n    n = snprintf (path, SEAF_PATH_MAX, \"%s/%s/\", priv->block_dir, store_id);\n    pos += n;\n#endif\n\n    memcpy (pos, block_sha1, 2);\n    pos[2] = '/';\n    pos += 3;\n\n    memcpy (pos, block_sha1 + 2, 41 - 2);\n\n    return path;\n}\n\nstatic int\nopen_tmp_file (BlockBackend *bend,\n               const char *basename,\n               char **path)\n{\n    FsPriv *priv = bend->be_priv;\n    int fd;\n\n    *path = g_strdup_printf (\"%s/%s.XXXXXX\", priv->tmp_dir, basename);\n    fd = g_mkstemp (*path);\n    if (fd < 0)\n        g_free (*path);\n\n    return fd;\n}\n\nBlockBackend *\nblock_backend_fs_new (const char *seaf_dir, const char *tmp_dir)\n{\n    BlockBackend *bend;\n    FsPriv *priv;\n\n    bend = g_new0(BlockBackend, 1);\n    priv = g_new0(FsPriv, 1);\n    bend->be_priv = priv;\n\n    priv->block_dir = g_build_filename (seaf_dir, \"storage\", \"blocks\", NULL);\n    priv->block_dir_len = strlen (priv->block_dir);\n\n    priv->tmp_dir = g_strdup (tmp_dir);\n    priv->tmp_dir_len = strlen (tmp_dir);\n\n    if (g_mkdir_with_parents (priv->block_dir, 0777) < 0) {\n        seaf_warning (\"Block dir %s does not exist and\"\n                   \" is unable to create\\n\", priv->block_dir);\n        goto onerror;\n    }\n\n    if (g_mkdir_with_parents (tmp_dir, 0777) < 0) {\n        seaf_warning (\"Blocks tmp dir %s does not exist and\"\n                   \" is unable to create\\n\", tmp_dir);\n        goto onerror;\n    }\n\n    bend->open_block = block_backend_fs_open_block;\n    bend->read_block = block_backend_fs_read_block;\n    bend->write_block = block_backend_fs_write_block;\n    bend->commit_block = block_backend_fs_commit_block;\n    bend->close_block = block_backend_fs_close_block;\n    bend->exists = block_backend_fs_block_exists;\n    bend->remove_block = block_backend_fs_remove_block;\n    bend->stat_block = block_backend_fs_stat_block;\n    bend->stat_block_by_handle = block_backend_fs_stat_block_by_handle;\n    bend->block_handle_free = block_backend_fs_block_handle_free;\n    bend->foreach_block = block_backend_fs_foreach_block;\n    bend->remove_store = block_backend_fs_remove_store;\n    bend->copy = block_backend_fs_copy;\n\n    return bend;\n\nonerror:\n    g_free (bend->be_priv);\n    g_free (bend);\n\n    return NULL;\n}\n"
  },
  {
    "path": "common/block-backend.c",
    "content": "\n#include \"common.h\"\n\n#include \"log.h\"\n\n#include \"block-backend.h\"\n\nextern BlockBackend *\nblock_backend_fs_new (const char *block_dir, const char *tmp_dir);\n\nBlockBackend*\nload_filesystem_block_backend(GKeyFile *config)\n{\n    BlockBackend *bend;\n    char *tmp_dir;\n    char *block_dir;\n    \n    block_dir = g_key_file_get_string (config, \"block_backend\", \"block_dir\", NULL);\n    if (!block_dir) {\n        seaf_warning (\"Block dir not set in config.\\n\");\n        return NULL;\n    }\n\n    tmp_dir = g_key_file_get_string (config, \"block_backend\", \"tmp_dir\", NULL);\n    if (!tmp_dir) {\n        seaf_warning (\"Block tmp dir not set in config.\\n\");\n        return NULL;\n    }\n\n    bend = block_backend_fs_new (block_dir, tmp_dir);\n\n    g_free (block_dir);\n    g_free (tmp_dir);\n    return bend;\n}\n\nBlockBackend*\nload_block_backend (GKeyFile *config)\n{\n    char *backend;\n    BlockBackend *bend;\n\n    backend = g_key_file_get_string (config, \"block_backend\", \"name\", NULL);\n    if (!backend) {\n        return NULL;\n    }\n\n    if (strcmp(backend, \"filesystem\") == 0) {\n        bend = load_filesystem_block_backend(config);\n        g_free (backend);\n        return bend;\n    }\n\n    seaf_warning (\"Unknown backend\\n\");\n    return NULL;\n}\n"
  },
  {
    "path": "common/block-backend.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef BLOCK_BACKEND_H\n#define BLOCK_BACKEND_H\n\n#include \"block.h\"\n\ntypedef struct BlockBackend BlockBackend;\n\nstruct BlockBackend {\n    \n    BHandle* (*open_block) (BlockBackend *bend,\n                            const char *store_id, int version,\n                            const char *block_id, int rw_type);\n\n    int      (*read_block) (BlockBackend *bend, BHandle *handle, void *buf, int len);\n    \n    int      (*write_block) (BlockBackend *bend, BHandle *handle, const void *buf, int len);\n    \n    int      (*commit_block) (BlockBackend *bend, BHandle *handle);\n\n    int      (*close_block) (BlockBackend *bend, BHandle *handle);\n\n    int      (*exists) (BlockBackend *bend,\n                        const char *store_id, int version,\n                        const char *block_id);\n\n    int      (*remove_block) (BlockBackend *bend,\n                              const char *store_id, int version,\n                              const char *block_id);\n\n    BMetadata* (*stat_block) (BlockBackend *bend,\n                              const char *store_id, int version,\n                              const char *block_id);\n    \n    BMetadata* (*stat_block_by_handle) (BlockBackend *bend, BHandle *handle);\n\n    void     (*block_handle_free) (BlockBackend *bend, BHandle *handle);\n\n    int      (*foreach_block) (BlockBackend *bend,\n                               const char *store_id,\n                               int version,\n                               SeafBlockFunc process,\n                               void *user_data);\n\n    int         (*copy) (BlockBackend *bend,\n                         const char *src_store_id,\n                         int src_version,\n                         const char *dst_store_id,\n                         int dst_version,\n                         const char *block_id);\n\n    /* Only valid for version 1 repo. Remove all blocks for the repo. */\n    int      (*remove_store) (BlockBackend *bend,\n                              const char *store_id);\n\n    void*    be_priv;           /* backend private field */\n\n};\n\n\nBlockBackend* load_block_backend (GKeyFile *config);\n\n#endif\n"
  },
  {
    "path": "common/block-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"utils.h\"\n#include \"seaf-utils.h\"\n#include \"block-mgr.h\"\n#include \"log.h\"\n\n#include <stdio.h>\n#include <errno.h>\n#include <unistd.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <sys/types.h>\n#include <dirent.h>\n#include <glib/gstdio.h>\n\n#include \"block-backend.h\"\n\n#define SEAF_BLOCK_DIR \"blocks\"\n\n\nextern BlockBackend *\nblock_backend_fs_new (const char *block_dir, const char *tmp_dir);\n\n\nSeafBlockManager *\nseaf_block_manager_new (struct _SeafileSession *seaf,\n                        const char *seaf_dir)\n{\n    SeafBlockManager *mgr;\n\n    mgr = g_new0 (SeafBlockManager, 1);\n    mgr->seaf = seaf;\n\n    mgr->backend = block_backend_fs_new (seaf_dir, seaf->tmp_file_dir);\n    if (!mgr->backend) {\n        seaf_warning (\"[Block mgr] Failed to load backend.\\n\");\n        goto onerror;\n    }\n\n    return mgr;\n\nonerror:\n    g_free (mgr);\n\n    return NULL;\n}\n\nint\nseaf_block_manager_init (SeafBlockManager *mgr)\n{\n    return 0;\n}\n\n\nBlockHandle *\nseaf_block_manager_open_block (SeafBlockManager *mgr,\n                               const char *store_id,\n                               int version,\n                               const char *block_id,\n                               int rw_type)\n{\n    if (!store_id || !is_uuid_valid(store_id) ||\n        !block_id || !is_object_id_valid(block_id))\n        return NULL;\n\n    return mgr->backend->open_block (mgr->backend,\n                                     store_id, version,\n                                     block_id, rw_type);\n}\n\nint\nseaf_block_manager_read_block (SeafBlockManager *mgr,\n                               BlockHandle *handle,\n                               void *buf, int len)\n{\n    return mgr->backend->read_block (mgr->backend, handle, buf, len);\n}\n\nint\nseaf_block_manager_write_block (SeafBlockManager *mgr,\n                                BlockHandle *handle,\n                                const void *buf, int len)\n{\n    return mgr->backend->write_block (mgr->backend, handle, buf, len);\n}\n\nint\nseaf_block_manager_close_block (SeafBlockManager *mgr,\n                                BlockHandle *handle)\n{\n    return mgr->backend->close_block (mgr->backend, handle);\n}\n\nvoid\nseaf_block_manager_block_handle_free (SeafBlockManager *mgr,\n                                      BlockHandle *handle)\n{\n    return mgr->backend->block_handle_free (mgr->backend, handle);\n}\n\nint\nseaf_block_manager_commit_block (SeafBlockManager *mgr,\n                                 BlockHandle *handle)\n{\n    return mgr->backend->commit_block (mgr->backend, handle);\n}\n    \ngboolean seaf_block_manager_block_exists (SeafBlockManager *mgr,\n                                          const char *store_id,\n                                          int version,\n                                          const char *block_id)\n{\n    if (!store_id || !is_uuid_valid(store_id) ||\n        !block_id || !is_object_id_valid(block_id))\n        return FALSE;\n\n    return mgr->backend->exists (mgr->backend, store_id, version, block_id);\n}\n\nint\nseaf_block_manager_remove_block (SeafBlockManager *mgr,\n                                 const char *store_id,\n                                 int version,\n                                 const char *block_id)\n{\n    if (!store_id || !is_uuid_valid(store_id) ||\n        !block_id || !is_object_id_valid(block_id))\n        return -1;\n\n    return mgr->backend->remove_block (mgr->backend, store_id, version, block_id);\n}\n\nBlockMetadata *\nseaf_block_manager_stat_block (SeafBlockManager *mgr,\n                               const char *store_id,\n                               int version,\n                               const char *block_id)\n{\n    if (!store_id || !is_uuid_valid(store_id) ||\n        !block_id || !is_object_id_valid(block_id))\n        return NULL;\n\n    return mgr->backend->stat_block (mgr->backend, store_id, version, block_id);\n}\n\nBlockMetadata *\nseaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr,\n                                         BlockHandle *handle)\n{\n    return mgr->backend->stat_block_by_handle (mgr->backend, handle);\n}\n\nint\nseaf_block_manager_foreach_block (SeafBlockManager *mgr,\n                                  const char *store_id,\n                                  int version,\n                                  SeafBlockFunc process,\n                                  void *user_data)\n{\n    return mgr->backend->foreach_block (mgr->backend,\n                                        store_id, version,\n                                        process, user_data);\n}\n\nint\nseaf_block_manager_copy_block (SeafBlockManager *mgr,\n                               const char *src_store_id,\n                               int src_version,\n                               const char *dst_store_id,\n                               int dst_version,\n                               const char *block_id)\n{\n    if (strcmp (block_id, EMPTY_SHA1) == 0)\n        return 0;\n    if (seaf_block_manager_block_exists (mgr, dst_store_id, dst_version, block_id)) {\n        return 0;\n    }\n\n    return mgr->backend->copy (mgr->backend,\n                               src_store_id,\n                               src_version,\n                               dst_store_id,\n                               dst_version,\n                               block_id);\n}\n\nstatic gboolean\nget_block_number (const char *store_id,\n                  int version,\n                  const char *block_id,\n                  void *data)\n{\n    guint64 *n_blocks = data;\n\n    ++(*n_blocks);\n\n    return TRUE;\n}\n\nguint64\nseaf_block_manager_get_block_number (SeafBlockManager *mgr,\n                                     const char *store_id,\n                                     int version)\n{\n    guint64 n_blocks = 0;\n\n    seaf_block_manager_foreach_block (mgr, store_id, version,\n                                      get_block_number, &n_blocks);\n\n    return n_blocks;\n}\n\ngboolean\nseaf_block_manager_verify_block (SeafBlockManager *mgr,\n                                 const char *store_id,\n                                 int version,\n                                 const char *block_id,\n                                 gboolean *io_error)\n{\n    BlockHandle *h;\n    char buf[10240];\n    int n;\n    SHA_CTX ctx;\n    guint8 sha1[20];\n    char check_id[41];\n\n    h = seaf_block_manager_open_block (mgr,\n                                       store_id, version,\n                                       block_id, BLOCK_READ);\n    if (!h) {\n        seaf_warning (\"Failed to open block %s:%.8s.\\n\", store_id, block_id);\n        *io_error = TRUE;\n        return FALSE;\n    }\n\n    SHA1_Init (&ctx);\n    while (1) {\n        n = seaf_block_manager_read_block (mgr, h, buf, sizeof(buf));\n        if (n < 0) {\n            seaf_warning (\"Failed to read block %s:%.8s.\\n\", store_id, block_id);\n            *io_error = TRUE;\n            return FALSE;\n        }\n        if (n == 0)\n            break;\n\n        SHA1_Update (&ctx, buf, n);\n    }\n\n    seaf_block_manager_close_block (mgr, h);\n    seaf_block_manager_block_handle_free (mgr, h);\n\n    SHA1_Final (sha1, &ctx);\n    rawdata_to_hex (sha1, check_id, 20);\n\n    if (strcmp (check_id, block_id) == 0)\n        return TRUE;\n    else\n        return FALSE;\n}\n\nint\nseaf_block_manager_remove_store (SeafBlockManager *mgr,\n                                 const char *store_id)\n{\n    return mgr->backend->remove_store (mgr->backend, store_id);\n}\n"
  },
  {
    "path": "common/block-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_BLOCK_MGR_H\n#define SEAF_BLOCK_MGR_H\n\n#include <glib.h>\n#include <glib-object.h>\n#include <stdint.h>\n\n#include \"block.h\"\n\nstruct _SeafileSession;\n\ntypedef struct _SeafBlockManager SeafBlockManager;\n\nstruct _SeafBlockManager {\n    struct _SeafileSession *seaf;\n\n    struct BlockBackend *backend;\n};\n\n\nSeafBlockManager *\nseaf_block_manager_new (struct _SeafileSession *seaf,\n                        const char *seaf_dir);\n\n/*\n * Open a block for read or write.\n *\n * @store_id: id for the block store\n * @version: data format version for the repo\n * @block_id: ID of block.\n * @rw_type: BLOCK_READ or BLOCK_WRITE.\n * Returns: A handle for the block.\n */\nBlockHandle *\nseaf_block_manager_open_block (SeafBlockManager *mgr,\n                               const char *store_id,\n                               int version,\n                               const char *block_id,\n                               int rw_type);\n\n/*\n * Read data from a block.\n * The semantics is similar to readn.\n *\n * @handle: Hanlde returned by seaf_block_manager_open_block().\n * @buf: Data wuold be copied into this buf.\n * @len: At most @len bytes would be read.\n *\n * Returns: the bytes read.\n */\nint\nseaf_block_manager_read_block (SeafBlockManager *mgr,\n                               BlockHandle *handle,\n                               void *buf, int len);\n\n/*\n * Write data to a block.\n * The semantics is similar to writen.\n *\n * @handle: Hanlde returned by seaf_block_manager_open_block().\n * @buf: Data to be written to the block.\n * @len: At most @len bytes would be written.\n *\n * Returns: the bytes written.\n */\nint\nseaf_block_manager_write_block (SeafBlockManager *mgr,\n                                BlockHandle *handle,\n                                const void *buf, int len);\n\n/*\n * Commit a block to storage.\n * The block must be opened for write.\n *\n * @handle: Hanlde returned by seaf_block_manager_open_block().\n *\n * Returns: 0 on success, -1 on error.\n */\nint\nseaf_block_manager_commit_block (SeafBlockManager *mgr,\n                                 BlockHandle *handle);\n\n/*\n * Close an open block.\n *\n * @handle: Hanlde returned by seaf_block_manager_open_block().\n *\n * Returns: 0 on success, -1 on error.\n */\nint\nseaf_block_manager_close_block (SeafBlockManager *mgr,\n                                BlockHandle *handle);\n\nvoid\nseaf_block_manager_block_handle_free (SeafBlockManager *mgr,\n                                      BlockHandle *handle);\n\ngboolean \nseaf_block_manager_block_exists (SeafBlockManager *mgr,\n                                 const char *store_id,\n                                 int version,\n                                 const char *block_id);\n\nint\nseaf_block_manager_remove_block (SeafBlockManager *mgr,\n                                 const char *store_id,\n                                 int version,\n                                 const char *block_id);\n\nBlockMetadata *\nseaf_block_manager_stat_block (SeafBlockManager *mgr,\n                               const char *store_id,\n                               int version,\n                               const char *block_id);\n\nBlockMetadata *\nseaf_block_manager_stat_block_by_handle (SeafBlockManager *mgr,\n                                         BlockHandle *handle);\n\nint\nseaf_block_manager_foreach_block (SeafBlockManager *mgr,\n                                  const char *store_id,\n                                  int version,\n                                  SeafBlockFunc process,\n                                  void *user_data);\n\nint\nseaf_block_manager_copy_block (SeafBlockManager *mgr,\n                               const char *src_store_id,\n                               int src_version,\n                               const char *dst_store_id,\n                               int dst_version,\n                               const char *block_id);\n\n/* Remove all blocks for a repo. Only valid for version 1 repo. */\nint\nseaf_block_manager_remove_store (SeafBlockManager *mgr,\n                                 const char *store_id);\n\nguint64\nseaf_block_manager_get_block_number (SeafBlockManager *mgr,\n                                     const char *store_id,\n                                     int version);\n\ngboolean\nseaf_block_manager_verify_block (SeafBlockManager *mgr,\n                                 const char *store_id,\n                                 int version,\n                                 const char *block_id,\n                                 gboolean *io_error);\n\n#endif\n"
  },
  {
    "path": "common/block-tx-utils.c",
    "content": "#include \"common.h\"\n#define DEBUG_FLAG SEAFILE_DEBUG_TRANSFER\n#include \"log.h\"\n\n#include \"utils.h\"\n#include \"block-tx-utils.h\"\n\n/* Utility functions for block transfer protocol. */\n\n/* Encryption related functions. */\n\nvoid\nblocktx_generate_encrypt_key (unsigned char *session_key, int sk_len,\n                              unsigned char *key, unsigned char *iv)\n{\n    EVP_BytesToKey (EVP_aes_256_cbc(), /* cipher mode */\n                    EVP_sha1(),        /* message digest */\n                    NULL,              /* salt */\n                    session_key,\n                    sk_len,\n                    3,   /* iteration times */\n                    key, /* the derived key */\n                    iv); /* IV, initial vector */\n}\n\nint\nblocktx_encrypt_init (EVP_CIPHER_CTX **ctx,\n                      const unsigned char *key,\n                      const unsigned char *iv)\n{\n    int ret;\n\n    /* Prepare CTX for encryption. */\n    *ctx = EVP_CIPHER_CTX_new ();\n\n    ret = EVP_EncryptInit_ex (*ctx,\n                              EVP_aes_256_cbc(), /* cipher mode */\n                              NULL, /* engine, NULL for default */\n                              key,  /* derived key */\n                              iv);  /* initial vector */\n    if (ret == 0)\n        return -1;\n\n    return 0;\n}\n\nint\nblocktx_decrypt_init (EVP_CIPHER_CTX **ctx,\n                      const unsigned char *key,\n                      const unsigned char *iv)\n{\n    int ret;\n\n    /* Prepare CTX for decryption. */\n    *ctx = EVP_CIPHER_CTX_new();\n\n    ret = EVP_DecryptInit_ex (*ctx,\n                              EVP_aes_256_cbc(), /* cipher mode */\n                              NULL, /* engine, NULL for default */\n                              key,  /* derived key */\n                              iv);  /* initial vector */\n    if (ret == 0)\n        return -1;\n\n    return 0;\n}\n\n/* Sending frame */\n\nint\nsend_encrypted_data_frame_begin (evutil_socket_t data_fd,\n                                 int frame_len)\n{\n    /* Compute data size after encryption.\n     * Block size is 16 bytes and AES always add one padding block.\n     */\n    int enc_frame_len;\n\n    enc_frame_len = ((frame_len >> 4) + 1) << 4;\n    enc_frame_len = htonl (enc_frame_len);\n\n    if (sendn (data_fd, &enc_frame_len, sizeof(int)) < 0) {\n        seaf_warning (\"Failed to send frame length: %s.\\n\",\n                      evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nsend_encrypted_data (EVP_CIPHER_CTX *ctx,\n                     evutil_socket_t data_fd,\n                     const void *buf, int len)\n{\n    char out_buf[len + ENC_BLOCK_SIZE];\n    int out_len;\n\n    if (EVP_EncryptUpdate (ctx,\n                           (unsigned char *)out_buf, &out_len,\n                           (unsigned char *)buf, len) == 0) {\n        seaf_warning (\"Failed to encrypt data.\\n\");\n        return -1;\n    }\n\n    if (sendn (data_fd, out_buf, out_len) < 0) {\n        seaf_warning (\"Failed to write data: %s.\\n\",\n                      evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nsend_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx,\n                               evutil_socket_t data_fd)\n{\n    char out_buf[ENC_BLOCK_SIZE];\n    int out_len;\n\n    if (EVP_EncryptFinal_ex (ctx, (unsigned char *)out_buf, &out_len) == 0) {\n        seaf_warning (\"Failed to encrypt data.\\n\");\n        return -1;\n    }\n    if (sendn (data_fd, out_buf, out_len) < 0) {\n        seaf_warning (\"Failed to write data: %s.\\n\",\n                      evutil_socket_error_to_string(evutil_socket_geterror(data_fd)));\n        return -1;\n    }\n\n    return 0;\n}\n\n/* Receiving frame */\n\nstatic int\nhandle_frame_content (struct evbuffer *buf, FrameParser *parser)\n{\n    char *frame;\n    EVP_CIPHER_CTX *ctx;\n    char *out;\n    int outlen, outlen2;\n    int ret = 0;\n\n    struct evbuffer *input = buf;\n\n    if (evbuffer_get_length (input) < parser->enc_frame_len)\n        return 0;\n\n    if (parser->version == 1)\n        blocktx_decrypt_init (&ctx, parser->key, parser->iv);\n    else if (parser->version == 2)\n        blocktx_decrypt_init (&ctx, parser->key_v2, parser->iv_v2);\n\n    frame = g_malloc (parser->enc_frame_len);\n    out = g_malloc (parser->enc_frame_len + ENC_BLOCK_SIZE);\n\n    evbuffer_remove (input, frame, parser->enc_frame_len);\n\n    if (EVP_DecryptUpdate (ctx,\n                           (unsigned char *)out, &outlen,\n                           (unsigned char *)frame,\n                           parser->enc_frame_len) == 0) {\n        seaf_warning (\"Failed to decrypt frame content.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    if (EVP_DecryptFinal_ex (ctx, (unsigned char *)(out + outlen), &outlen2) == 0)\n    {\n        seaf_warning (\"Failed to decrypt frame content.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    ret = parser->content_cb (out, outlen + outlen2, parser->cbarg);\n\nout:\n    g_free (frame);\n    g_free (out);\n    parser->enc_frame_len = 0;\n    EVP_CIPHER_CTX_free (ctx);\n    return ret;\n}\n\nint\nhandle_one_frame (struct evbuffer *buf, FrameParser *parser)\n{\n    struct evbuffer *input = buf;\n\n    if (!parser->enc_frame_len) {\n        /* Read the length of the encrypted frame first. */\n        if (evbuffer_get_length (input) < sizeof(int))\n            return 0;\n\n        int frame_len;\n        evbuffer_remove (input, &frame_len, sizeof(int));\n        parser->enc_frame_len = ntohl (frame_len);\n\n        if (evbuffer_get_length (input) > 0)\n            return handle_frame_content (buf, parser);\n\n        return 0;\n    } else {\n        return handle_frame_content (buf, parser);\n    }\n}\n\nstatic int\nhandle_frame_fragment_content (struct evbuffer *buf, FrameParser *parser)\n{\n    char *fragment = NULL, *out = NULL;\n    int fragment_len, outlen;\n    int ret = 0;\n\n    struct evbuffer *input = buf;\n\n    fragment_len = evbuffer_get_length (input);\n    fragment = g_malloc (fragment_len);\n    evbuffer_remove (input, fragment, fragment_len);\n\n    out = g_malloc (fragment_len + ENC_BLOCK_SIZE);\n\n    if (EVP_DecryptUpdate (parser->ctx,\n                           (unsigned char *)out, &outlen,\n                           (unsigned char *)fragment, fragment_len) == 0) {\n        seaf_warning (\"Failed to decrypt frame fragment.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    ret = parser->fragment_cb (out, outlen, 0, parser->cbarg);\n    if (ret < 0)\n        goto out;\n\n    parser->remain -= fragment_len;\n\n    if (parser->remain <= 0) {\n        if (EVP_DecryptFinal_ex (parser->ctx,\n                                 (unsigned char *)out,\n                                 &outlen) == 0) {\n            seaf_warning (\"Failed to decrypt frame fragment.\\n\");\n            ret = -1;\n            goto out;\n        }\n\n        ret = parser->fragment_cb (out, outlen, 1, parser->cbarg);\n        if (ret < 0)\n            goto out;\n\n        EVP_CIPHER_CTX_free (parser->ctx);\n        parser->enc_init = FALSE;\n        parser->enc_frame_len = 0;\n    }\n\nout:\n    g_free (fragment);\n    g_free (out);\n    if (ret < 0) {\n        EVP_CIPHER_CTX_free (parser->ctx);\n        parser->enc_init = FALSE;\n        parser->enc_frame_len = 0;\n    }\n    return ret;\n}\n\nint\nhandle_frame_fragments (struct evbuffer *buf, FrameParser *parser)\n{\n    struct evbuffer *input = buf;\n\n    if (!parser->enc_frame_len) {\n        /* Read the length of the encrypted frame first. */\n        if (evbuffer_get_length (input) < sizeof(int))\n            return 0;\n\n        int frame_len;\n        evbuffer_remove (input, &frame_len, sizeof(int));\n        parser->enc_frame_len = ntohl (frame_len);\n        parser->remain = parser->enc_frame_len;\n\n        if (parser->version == 1)\n            blocktx_decrypt_init (&parser->ctx, parser->key, parser->iv);\n        else if (parser->version == 2)\n            blocktx_decrypt_init (&parser->ctx, parser->key_v2, parser->iv_v2);\n        parser->enc_init = TRUE;\n\n        if (evbuffer_get_length (input) > 0)\n            return handle_frame_fragment_content (buf, parser);\n\n        return 0;\n    } else {\n        return handle_frame_fragment_content (buf, parser);\n    }\n}\n"
  },
  {
    "path": "common/block-tx-utils.h",
    "content": "#ifndef BLOCK_TX_UTILS_H\n#define BLOCK_TX_UTILS_H\n\n#include <event2/buffer.h>\n#include <event2/util.h>\n#include <openssl/evp.h>\n\n/* Common structures and contants shared by the client and server. */\n\n/* We use AES 256 */\n#define ENC_KEY_SIZE 32\n#define ENC_BLOCK_SIZE 16\n\n#define BLOCK_PROTOCOL_VERSION 2\n\nenum {\n    STATUS_OK = 0,\n    STATUS_VERSION_MISMATCH,\n    STATUS_BAD_REQUEST,\n    STATUS_ACCESS_DENIED,\n    STATUS_INTERNAL_SERVER_ERROR,\n    STATUS_NOT_FOUND,\n};\n\nstruct _HandshakeRequest {\n    gint32 version;\n    gint32 key_len;\n    char enc_session_key[0];\n} __attribute__((__packed__));\n\ntypedef struct _HandshakeRequest HandshakeRequest;\n\nstruct _HandshakeResponse {\n    gint32 status;\n    gint32 version;\n} __attribute__((__packed__));\n\ntypedef struct _HandshakeResponse HandshakeResponse;\n\nstruct _AuthResponse {\n    gint32 status;\n} __attribute__((__packed__));\n\ntypedef struct _AuthResponse AuthResponse;\n\nenum {\n    REQUEST_COMMAND_GET = 0,\n    REQUEST_COMMAND_PUT,\n};\n\nstruct _RequestHeader {\n    gint32 command;\n    char block_id[40];\n} __attribute__((__packed__));\n\ntypedef struct _RequestHeader RequestHeader;\n\nstruct _ResponseHeader {\n    gint32 status;\n} __attribute__((__packed__));\n\ntypedef struct _ResponseHeader ResponseHeader;\n\n/* Utility functions for encryption. */\n\nvoid\nblocktx_generate_encrypt_key (unsigned char *session_key, int sk_len,\n                              unsigned char *key, unsigned char *iv);\n\nint\nblocktx_encrypt_init (EVP_CIPHER_CTX **ctx,\n                      const unsigned char *key,\n                      const unsigned char *iv);\n\nint\nblocktx_decrypt_init (EVP_CIPHER_CTX **ctx,\n                      const unsigned char *key,\n                      const unsigned char *iv);\n\n/*\n * Encrypted data is sent in \"frames\".\n * Format of a frame:\n *\n * length of data in the frame after encryption + encrypted data.\n *\n * Each frame can contain three types of contents:\n * 1. Auth request or response;\n * 2. Block request or response header;\n * 3. Block content.\n */\n\nint\nsend_encrypted_data_frame_begin (evutil_socket_t data_fd,\n                                 int frame_len);\n\nint\nsend_encrypted_data (EVP_CIPHER_CTX *ctx,\n                     evutil_socket_t data_fd,\n                     const void *buf, int len);\n\nint\nsend_encrypted_data_frame_end (EVP_CIPHER_CTX *ctx,\n                               evutil_socket_t data_fd);\n\ntypedef int (*FrameContentCB) (char *, int, void *);\n\ntypedef int (*FrameFragmentCB) (char *, int, int, void *);\n\ntypedef struct _FrameParser {\n    int enc_frame_len;\n\n    unsigned char key[ENC_KEY_SIZE];\n    unsigned char iv[ENC_BLOCK_SIZE];\n    gboolean enc_init;\n    EVP_CIPHER_CTX *ctx;\n\n    unsigned char key_v2[ENC_KEY_SIZE];\n    unsigned char iv_v2[ENC_BLOCK_SIZE];\n\n    int version;\n\n    /* Used when parsing fragments */\n    int remain;\n\n    FrameContentCB content_cb;\n    FrameFragmentCB fragment_cb;\n    void *cbarg;\n} FrameParser;\n\n/* Handle entire frame all at once.\n * parser->content_cb() will be called after the entire frame is read.\n */\nint\nhandle_one_frame (struct evbuffer *buf, FrameParser *parser);\n\n/* Handle a frame fragment by fragment.\n * parser->fragment_cb() will be called when any amount data is read.\n */\nint\nhandle_frame_fragments (struct evbuffer *buf, FrameParser *parser);\n\n#endif\n"
  },
  {
    "path": "common/block.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef BLOCK_H\n#define BLOCK_H\n\ntypedef struct _BMetadata BlockMetadata;\ntypedef struct _BMetadata BMetadata;\n\nstruct _BMetadata {\n    char        id[41];\n    uint32_t    size;\n};\n\n/* Opaque block handle.\n */\ntypedef struct _BHandle BlockHandle;\ntypedef struct _BHandle BHandle;\n\nenum {\n    BLOCK_READ,\n    BLOCK_WRITE,\n};\n\ntypedef gboolean (*SeafBlockFunc) (const char *store_id,\n                                   int version,\n                                   const char *block_id,\n                                   void *user_data);\n\n#endif\n"
  },
  {
    "path": "common/branch-mgr.c",
    "content": "#include \"common.h\"\n\n#include \"log.h\"\n\n#ifndef SEAFILE_SERVER\n#include \"db.h\"\n#else\n#include \"seaf-db.h\"\n#endif\n\n#include \"seafile-session.h\"\n\n#ifdef FULL_FEATURE\n#include \"notif-mgr.h\"\n#endif\n\n#include \"branch-mgr.h\"\n\n#define BRANCH_DB \"branch.db\"\n\nSeafBranch *\nseaf_branch_new (const char *name, const char *repo_id, const char *commit_id)\n{\n    SeafBranch *branch;\n\n    branch = g_new0 (SeafBranch, 1);\n\n    branch->name = g_strdup (name);\n    memcpy (branch->repo_id, repo_id, 36);\n    branch->repo_id[36] = '\\0';\n    memcpy (branch->commit_id, commit_id, 40);\n    branch->commit_id[40] = '\\0';\n\n    branch->ref = 1;\n\n    return branch;\n}\n\nvoid\nseaf_branch_free (SeafBranch *branch)\n{\n    if (branch == NULL) return;\n    g_free (branch->name);\n    g_free (branch);\n}\n\nvoid\nseaf_branch_list_free (GList *blist)\n{\n    GList *ptr;\n\n    for (ptr = blist; ptr; ptr = ptr->next) {\n        seaf_branch_unref (ptr->data);\n    }\n    g_list_free (blist);\n}\n\n\nvoid\nseaf_branch_set_commit (SeafBranch *branch, const char *commit_id)\n{\n    memcpy (branch->commit_id, commit_id, 40);\n    branch->commit_id[40] = '\\0';\n}\n\nvoid\nseaf_branch_ref (SeafBranch *branch)\n{\n    branch->ref++;\n}\n\nvoid\nseaf_branch_unref (SeafBranch *branch)\n{\n    if (!branch)\n        return;\n\n    if (--branch->ref <= 0)\n        seaf_branch_free (branch);\n}\n\nstruct _SeafBranchManagerPriv {\n    sqlite3 *db;\n#ifndef SEAFILE_SERVER\n    pthread_mutex_t db_lock;\n#endif\n};\n\nstatic int open_db (SeafBranchManager *mgr);\n\nSeafBranchManager *\nseaf_branch_manager_new (struct _SeafileSession *seaf)\n{\n    SeafBranchManager *mgr;\n\n    mgr = g_new0 (SeafBranchManager, 1);\n    mgr->priv = g_new0 (SeafBranchManagerPriv, 1);\n    mgr->seaf = seaf;\n\n#ifndef SEAFILE_SERVER\n    pthread_mutex_init (&mgr->priv->db_lock, NULL);\n#endif\n\n    return mgr;\n}\n\nint\nseaf_branch_manager_init (SeafBranchManager *mgr)\n{\n    return open_db (mgr);\n}\n\nstatic int\nopen_db (SeafBranchManager *mgr)\n{\n    if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL)\n        return 0;\n#ifndef SEAFILE_SERVER\n\n    char *db_path;\n    const char *sql;\n\n    db_path = g_build_filename (mgr->seaf->seaf_dir, BRANCH_DB, NULL);\n    if (sqlite_open_db (db_path, &mgr->priv->db) < 0) {\n        g_critical (\"[Branch mgr] Failed to open branch db\\n\");\n        g_free (db_path);\n        return -1;\n    }\n    g_free (db_path);\n\n    sql = \"CREATE TABLE IF NOT EXISTS Branch (\"\n          \"name TEXT, repo_id TEXT, commit_id TEXT);\";\n    if (sqlite_query_exec (mgr->priv->db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS branch_index ON Branch(repo_id, name);\";\n    if (sqlite_query_exec (mgr->priv->db, sql) < 0)\n        return -1;\n\n#elif defined FULL_FEATURE\n\n    char *sql;\n    switch (seaf_db_type (mgr->seaf->db)) {\n    case SEAF_DB_TYPE_MYSQL:\n        sql = \"CREATE TABLE IF NOT EXISTS Branch (\"\n            \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41),\"\n            \"UNIQUE INDEX(repo_id, name)) ENGINE = INNODB\";\n        if (seaf_db_query (mgr->seaf->db, sql) < 0)\n            return -1;\n        break;\n    case SEAF_DB_TYPE_PGSQL:\n        sql = \"CREATE TABLE IF NOT EXISTS Branch (\"\n            \"name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40),\"\n            \"PRIMARY KEY (repo_id, name))\";\n        if (seaf_db_query (mgr->seaf->db, sql) < 0)\n            return -1;\n        break;\n    case SEAF_DB_TYPE_SQLITE:\n        sql = \"CREATE TABLE IF NOT EXISTS Branch (\"\n            \"name VARCHAR(10), repo_id CHAR(41), commit_id CHAR(41),\"\n            \"PRIMARY KEY (repo_id, name))\";\n        if (seaf_db_query (mgr->seaf->db, sql) < 0)\n            return -1;\n        break;\n    }\n\n#endif\n\n    return 0;\n}\n\nint\nseaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch)\n{\n#ifndef SEAFILE_SERVER\n    char sql[256];\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    sqlite3_snprintf (sizeof(sql), sql,\n                      \"SELECT 1 FROM Branch WHERE name=%Q and repo_id=%Q\",\n                      branch->name, branch->repo_id);\n    if (sqlite_check_for_existence (mgr->priv->db, sql))\n        sqlite3_snprintf (sizeof(sql), sql,\n                          \"UPDATE Branch SET commit_id=%Q WHERE \"\n                          \"name=%Q and repo_id=%Q\",\n                          branch->commit_id, branch->name, branch->repo_id);\n    else\n        sqlite3_snprintf (sizeof(sql), sql,\n                          \"INSERT INTO Branch (name, repo_id, commit_id) VALUES (%Q, %Q, %Q)\",\n                          branch->name, branch->repo_id, branch->commit_id);\n\n    sqlite_query_exec (mgr->priv->db, sql);\n\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n\n    return 0;\n#else\n    char *sql;\n    SeafDB *db = mgr->seaf->db;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        sql = \"SELECT repo_id FROM Branch WHERE name=? AND repo_id=?\";\n        exists = seaf_db_statement_exists(db, sql, &err,\n                                          2, \"string\", branch->name,\n                                          \"string\", branch->repo_id);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE Branch SET commit_id=? \"\n                                          \"WHERE name=? AND repo_id=?\",\n                                          3, \"string\", branch->commit_id,\n                                          \"string\", branch->name,\n                                          \"string\", branch->repo_id);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)\",\n                                          3, \"string\", branch->name,\n                                          \"string\", branch->repo_id,\n                                          \"string\", branch->commit_id);\n        if (rc < 0)\n            return -1;\n    } else {\n        int rc = seaf_db_statement_query (db,\n                                 \"REPLACE INTO Branch (name, repo_id, commit_id) VALUES (?, ?, ?)\",\n                                 3, \"string\", branch->name,\n                                 \"string\", branch->repo_id,\n                                 \"string\", branch->commit_id);\n        if (rc < 0)\n            return -1;\n    }\n    return 0;\n#endif\n}\n\nint\nseaf_branch_manager_del_branch (SeafBranchManager *mgr,\n                                const char *repo_id,\n                                const char *name)\n{\n#ifndef SEAFILE_SERVER\n    char *sql;\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    sql = sqlite3_mprintf (\"DELETE FROM Branch WHERE name = %Q AND \"\n                           \"repo_id = '%s'\", name, repo_id);\n    if (sqlite_query_exec (mgr->priv->db, sql) < 0)\n        seaf_warning (\"Delete branch %s failed\\n\", name);\n    sqlite3_free (sql);\n\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n\n    return 0;\n#else\n    int rc = seaf_db_statement_query (mgr->seaf->db,\n                                      \"DELETE FROM Branch WHERE name=? AND repo_id=?\",\n                                      2, \"string\", name, \"string\", repo_id);\n    if (rc < 0)\n        return -1;\n    return 0;\n#endif\n}\n\nint\nseaf_branch_manager_update_branch (SeafBranchManager *mgr, SeafBranch *branch)\n{\n#ifndef SEAFILE_SERVER\n    sqlite3 *db;\n    char *sql;\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    db = mgr->priv->db;\n    sql = sqlite3_mprintf (\"UPDATE Branch SET commit_id = %Q \"\n                           \"WHERE name = %Q AND repo_id = %Q\",\n                           branch->commit_id, branch->name, branch->repo_id);\n    sqlite_query_exec (db, sql);\n    sqlite3_free (sql);\n\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n\n    return 0;\n#else\n    int rc = seaf_db_statement_query (mgr->seaf->db,\n                                      \"UPDATE Branch SET commit_id = ? \"\n                                      \"WHERE name = ? AND repo_id = ?\",\n                                      3, \"string\", branch->commit_id,\n                                      \"string\", branch->name,\n                                      \"string\", branch->repo_id);\n    if (rc < 0)\n        return -1;\n    return 0;\n#endif\n}\n\n#if defined( SEAFILE_SERVER ) && defined( FULL_FEATURE )\n\n#include \"mq-mgr.h\"\n\nstatic gboolean\nget_commit_id (SeafDBRow *row, void *data)\n{\n    char *out_commit_id = data;\n    const char *commit_id;\n\n    commit_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (out_commit_id, commit_id, 41);\n    out_commit_id[40] = '\\0';\n\n    return FALSE;\n}\n\nstatic void\npublish_repo_update_event (const char *repo_id, const char *commit_id)\n{\n    json_t *msg = json_object ();\n    char *msg_str = NULL;\n\n    json_object_set_new (msg, \"msg_type\", json_string(\"repo-update\"));\n    json_object_set_new (msg, \"repo_id\", json_string(repo_id));\n    json_object_set_new (msg, \"commit_id\", json_string(commit_id));\n\n    msg_str = json_dumps (msg, JSON_PRESERVE_ORDER);\n\n    seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_EVENT, msg_str);\n    g_free (msg_str);\n    json_decref (msg);\n}\n\nstatic void\nnotify_repo_update (const char *repo_id, const char *commit_id)\n{\n    json_t *event = NULL;\n    json_t *content = NULL;\n    char *msg = NULL;\n\n    event = json_object ();\n    content = json_object ();\n\n    json_object_set_new (event, \"type\", json_string(\"repo-update\"));\n\n    json_object_set_new (content, \"repo_id\", json_string(repo_id));\n    json_object_set_new (content, \"commit_id\", json_string(commit_id));\n\n    json_object_set_new (event, \"content\", content);\n\n    msg = json_dumps (event, JSON_COMPACT);\n\n    if (seaf->notif_mgr)\n        seaf_notif_manager_send_event (seaf->notif_mgr, msg);\n\n    json_decref (event);\n    g_free (msg);\n}\n\nstatic void\non_branch_updated (SeafBranchManager *mgr, SeafBranch *branch)\n{\n    if (seaf->is_repair)\n        return;\n    seaf_repo_manager_update_repo_info (seaf->repo_mgr, branch->repo_id, branch->commit_id);\n\n    notify_repo_update(branch->repo_id, branch->commit_id);\n\n    if (seaf_repo_manager_is_virtual_repo (seaf->repo_mgr, branch->repo_id))\n        return;\n\n    publish_repo_update_event (branch->repo_id, branch->commit_id);\n}\n\nstatic gboolean\nget_gc_id (SeafDBRow *row, void *data)\n{\n    char **out_gc_id = data;\n\n    *out_gc_id = g_strdup(seaf_db_row_get_column_text (row, 0));\n\n    return FALSE;\n}\n\nint\nseaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr,\n                                            SeafBranch *branch,\n                                            const char *old_commit_id,\n                                            gboolean check_gc,\n                                            const char *last_gc_id,\n                                            const char *origin_repo_id,\n                                            gboolean *gc_conflict)\n{\n    SeafDBTrans *trans;\n    char *sql;\n    char commit_id[41] = { 0 };\n    char *gc_id = NULL;\n\n    if (check_gc)\n        *gc_conflict = FALSE;\n\n    trans = seaf_db_begin_transaction (mgr->seaf->db);\n    if (!trans)\n        return -1;\n\n    if (check_gc) {\n        sql = \"SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE\";\n        if (!origin_repo_id) {\n            if (seaf_db_trans_foreach_selected_row (trans, sql,\n                                                    get_gc_id, &gc_id,\n                                                    1, \"string\", branch->repo_id) < 0) {\n                seaf_db_rollback (trans);\n                seaf_db_trans_close (trans);\n                return -1;\n            }\n        }\n        else {\n            if (seaf_db_trans_foreach_selected_row (trans, sql,\n                                                    get_gc_id, &gc_id,\n                                                    1, \"string\", origin_repo_id) < 0) {\n                seaf_db_rollback (trans);\n                seaf_db_trans_close (trans);\n                return -1;\n            }\n        }\n\n        if (g_strcmp0 (last_gc_id, gc_id) != 0) {\n            seaf_warning (\"Head branch update for repo %s conflicts with GC.\\n\",\n                          branch->repo_id);\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            *gc_conflict = TRUE;\n            g_free (gc_id);\n            return -1;\n        }\n        g_free (gc_id);\n    }\n\n    switch (seaf_db_type (mgr->seaf->db)) {\n    case SEAF_DB_TYPE_MYSQL:\n    case SEAF_DB_TYPE_PGSQL:\n        sql = \"SELECT commit_id FROM Branch WHERE name=? \"\n            \"AND repo_id=? FOR UPDATE\";\n        break;\n    case SEAF_DB_TYPE_SQLITE:\n        sql = \"SELECT commit_id FROM Branch WHERE name=? \"\n            \"AND repo_id=?\";\n        break;\n    default:\n        g_return_val_if_reached (-1);\n    }\n    if (seaf_db_trans_foreach_selected_row (trans, sql,\n                                            get_commit_id, commit_id,\n                                            2, \"string\", branch->name,\n                                            \"string\", branch->repo_id) < 0) {\n        seaf_db_rollback (trans);\n        seaf_db_trans_close (trans);\n        return -1;\n    }\n    if (strcmp (old_commit_id, commit_id) != 0) {\n        seaf_db_rollback (trans);\n        seaf_db_trans_close (trans);\n        return -1;\n    }\n\n    sql = \"UPDATE Branch SET commit_id = ? \"\n        \"WHERE name = ? AND repo_id = ?\";\n    if (seaf_db_trans_query (trans, sql, 3, \"string\", branch->commit_id,\n                             \"string\", branch->name,\n                             \"string\", branch->repo_id) < 0) {\n        seaf_db_rollback (trans);\n        seaf_db_trans_close (trans);\n        return -1;\n    }\n\n    if (seaf_db_commit (trans) < 0) {\n        seaf_db_rollback (trans);\n        seaf_db_trans_close (trans);\n        return -1;\n    }\n\n    seaf_db_trans_close (trans);\n\n    on_branch_updated (mgr, branch);\n\n    return 0;\n}\n\n#endif\n\n#ifndef SEAFILE_SERVER\nstatic SeafBranch *\nreal_get_branch (SeafBranchManager *mgr,\n                 const char *repo_id,\n                 const char *name)\n{\n    SeafBranch *branch = NULL;\n    sqlite3_stmt *stmt;\n    sqlite3 *db;\n    char *sql;\n    int result;\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    db = mgr->priv->db;\n    sql = sqlite3_mprintf (\"SELECT commit_id FROM Branch \"\n                           \"WHERE name = %Q and repo_id='%s'\",\n                           name, repo_id);\n    if (!(stmt = sqlite_query_prepare (db, sql))) {\n        seaf_warning (\"[Branch mgr] Couldn't prepare query %s\\n\", sql);\n        sqlite3_free (sql);\n        pthread_mutex_unlock (&mgr->priv->db_lock);\n        return NULL;\n    }\n    sqlite3_free (sql);\n\n    result = sqlite3_step (stmt);\n    if (result == SQLITE_ROW) {\n        char *commit_id = (char *)sqlite3_column_text (stmt, 0);\n\n        branch = seaf_branch_new (name, repo_id, commit_id);\n        pthread_mutex_unlock (&mgr->priv->db_lock);\n        sqlite3_finalize (stmt);\n        return branch;\n    } else if (result == SQLITE_ERROR) {\n        const char *str = sqlite3_errmsg (db);\n        seaf_warning (\"Couldn't prepare query, error: %d->'%s'\\n\",\n                   result, str ? str : \"no error given\");\n    }\n\n    sqlite3_finalize (stmt);\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n    return NULL;\n}\n\nSeafBranch *\nseaf_branch_manager_get_branch (SeafBranchManager *mgr,\n                                const char *repo_id,\n                                const char *name)\n{\n    SeafBranch *branch;\n\n    /* \"fetch_head\" maps to \"local\" or \"master\" on client (LAN sync) */\n    if (strcmp (name, \"fetch_head\") == 0) {\n        branch = real_get_branch (mgr, repo_id, \"local\");\n        if (!branch) {\n            branch = real_get_branch (mgr, repo_id, \"master\");\n        }\n        return branch;\n    } else {\n        return real_get_branch (mgr, repo_id, name);\n    }\n}\n\n#else\n\nstatic gboolean\nget_branch (SeafDBRow *row, void *vid)\n{\n    char *ret = vid;\n    const char *commit_id;\n\n    commit_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (ret, commit_id, 41);\n\n    return FALSE;\n}\n\nstatic SeafBranch *\nreal_get_branch (SeafBranchManager *mgr,\n                 const char *repo_id,\n                 const char *name)\n{\n    char commit_id[41];\n    char *sql;\n\n    commit_id[0] = 0;\n    sql = \"SELECT commit_id FROM Branch WHERE name=? AND repo_id=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       get_branch, commit_id,\n                                       2, \"string\", name, \"string\", repo_id) < 0) {\n        seaf_warning (\"[branch mgr] DB error when get branch %s.\\n\", name);\n        return NULL;\n    }\n\n    if (commit_id[0] == 0)\n        return NULL;\n\n    return seaf_branch_new (name, repo_id, commit_id);\n}\n\nSeafBranch *\nseaf_branch_manager_get_branch (SeafBranchManager *mgr,\n                                const char *repo_id,\n                                const char *name)\n{\n    SeafBranch *branch;\n\n    /* \"fetch_head\" maps to \"master\" on server. */\n    if (strcmp (name, \"fetch_head\") == 0) {\n        branch = real_get_branch (mgr, repo_id, \"master\");\n        return branch;\n    } else {\n        return real_get_branch (mgr, repo_id, name);\n    }\n}\n\n#endif  /* not SEAFILE_SERVER */\n\ngboolean\nseaf_branch_manager_branch_exists (SeafBranchManager *mgr,\n                                   const char *repo_id,\n                                   const char *name)\n{\n#ifndef SEAFILE_SERVER\n    char *sql;\n    gboolean ret;\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    sql = sqlite3_mprintf (\"SELECT name FROM Branch WHERE name = %Q \"\n                           \"AND repo_id='%s'\", name, repo_id);\n    ret = sqlite_check_for_existence (mgr->priv->db, sql);\n    sqlite3_free (sql);\n\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n    return ret;\n#else\n    gboolean db_err = FALSE;\n\n    return seaf_db_statement_exists (mgr->seaf->db,\n                                     \"SELECT name FROM Branch WHERE name=? \"\n                                     \"AND repo_id=?\", &db_err,\n                                     2, \"string\", name, \"string\", repo_id);\n#endif\n}\n\n#ifndef SEAFILE_SERVER\nGList *\nseaf_branch_manager_get_branch_list (SeafBranchManager *mgr,\n                                     const char *repo_id)\n{\n    sqlite3 *db = mgr->priv->db;\n    \n    int result;\n    sqlite3_stmt *stmt;\n    char sql[256];\n    char *name;\n    char *commit_id;\n    GList *ret = NULL;\n    SeafBranch *branch;\n\n    snprintf (sql, 256, \"SELECT name, commit_id FROM branch WHERE repo_id ='%s'\",\n              repo_id);\n\n    pthread_mutex_lock (&mgr->priv->db_lock);\n\n    if ( !(stmt = sqlite_query_prepare(db, sql)) ) {\n        pthread_mutex_unlock (&mgr->priv->db_lock);\n        return NULL;\n    }\n\n    while (1) {\n        result = sqlite3_step (stmt);\n        if (result == SQLITE_ROW) {\n            name = (char *)sqlite3_column_text(stmt, 0);\n            commit_id = (char *)sqlite3_column_text(stmt, 1);\n            branch = seaf_branch_new (name, repo_id, commit_id);\n            ret = g_list_prepend (ret, branch);\n        }\n        if (result == SQLITE_DONE)\n            break;\n        if (result == SQLITE_ERROR) {\n            const gchar *str = sqlite3_errmsg (db);\n            seaf_warning (\"Couldn't prepare query, error: %d->'%s'\\n\", \n                       result, str ? str : \"no error given\");\n            sqlite3_finalize (stmt);\n            seaf_branch_list_free (ret);\n            pthread_mutex_unlock (&mgr->priv->db_lock);\n            return NULL;\n        }\n    }\n\n    sqlite3_finalize (stmt);\n    pthread_mutex_unlock (&mgr->priv->db_lock);\n    return g_list_reverse(ret);\n}\n#else\nstatic gboolean\nget_branches (SeafDBRow *row, void *vplist)\n{\n    GList **plist = vplist;\n    const char *commit_id;\n    const char *name;\n    const char *repo_id;\n    SeafBranch *branch;\n\n    name = seaf_db_row_get_column_text (row, 0);\n    repo_id = seaf_db_row_get_column_text (row, 1);\n    commit_id = seaf_db_row_get_column_text (row, 2);\n\n    branch = seaf_branch_new (name, repo_id, commit_id);\n    *plist = g_list_prepend (*plist, branch);\n\n    return TRUE;\n}\n\nGList *\nseaf_branch_manager_get_branch_list (SeafBranchManager *mgr,\n                                     const char *repo_id)\n{\n    GList *ret = NULL;\n    char *sql;\n\n    sql = \"SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       get_branches, &ret,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"[branch mgr] DB error when get branch list.\\n\");\n        return NULL;\n    }\n\n    return ret;\n}\n#endif\n"
  },
  {
    "path": "common/branch-mgr.h",
    "content": "#ifndef SEAF_BRANCH_MGR_H\n#define SEAF_BRANCH_MGR_H\n\n#include \"commit-mgr.h\"\n#define NO_BRANCH \"-\"\n\ntypedef struct _SeafBranch SeafBranch;\n\nstruct _SeafBranch {\n    int   ref;\n    char *name;\n    char  repo_id[37];\n    char  commit_id[41];\n};\n\nSeafBranch *seaf_branch_new (const char *name,\n                             const char *repo_id,\n                             const char *commit_id);\nvoid seaf_branch_free (SeafBranch *branch);\nvoid seaf_branch_set_commit (SeafBranch *branch, const char *commit_id);\n\nvoid seaf_branch_ref (SeafBranch *branch);\nvoid seaf_branch_unref (SeafBranch *branch);\n\n\ntypedef struct _SeafBranchManager SeafBranchManager;\ntypedef struct _SeafBranchManagerPriv SeafBranchManagerPriv;\n\nstruct _SeafileSession;\nstruct _SeafBranchManager {\n    struct _SeafileSession *seaf;\n\n    SeafBranchManagerPriv *priv;\n};\n\nSeafBranchManager *seaf_branch_manager_new (struct _SeafileSession *seaf);\nint seaf_branch_manager_init (SeafBranchManager *mgr);\n\nint\nseaf_branch_manager_add_branch (SeafBranchManager *mgr, SeafBranch *branch);\n\nint\nseaf_branch_manager_del_branch (SeafBranchManager *mgr,\n                                const char *repo_id,\n                                const char *name);\n\nvoid\nseaf_branch_list_free (GList *blist);\n\nint\nseaf_branch_manager_update_branch (SeafBranchManager *mgr,\n                                   SeafBranch *branch);\n\n#ifdef SEAFILE_SERVER\n/**\n * Atomically test whether the current head commit id on @branch\n * is the same as @old_commit_id and update branch in db.\n */\nint\nseaf_branch_manager_test_and_update_branch (SeafBranchManager *mgr,\n                                            SeafBranch *branch,\n                                            const char *old_commit_id,\n                                            gboolean check_gc,\n                                            const char *last_gc_id,\n                                            const char *origin_repo_id,\n                                            gboolean *gc_conflict);\n#endif\n\nSeafBranch *\nseaf_branch_manager_get_branch (SeafBranchManager *mgr,\n                                const char *repo_id,\n                                const char *name);\n\n\ngboolean\nseaf_branch_manager_branch_exists (SeafBranchManager *mgr,\n                                   const char *repo_id,\n                                   const char *name);\n\nGList *\nseaf_branch_manager_get_branch_list (SeafBranchManager *mgr,\n                                     const char *repo_id);\n\ngint64\nseaf_branch_manager_calculate_branch_size (SeafBranchManager *mgr,\n                                           const char *repo_id, \n                                           const char *commit_id);\n#endif /* SEAF_BRANCH_MGR_H */\n"
  },
  {
    "path": "common/cdc/Makefile.am",
    "content": "AM_CFLAGS = -I$(top_srcdir)/common -I$(top_srcdir)/lib \\\n\t-Wall @GLIB2_CFLAGS@ @MSVC_CFLAGS@\n\nnoinst_LTLIBRARIES = libcdc.la\n\nnoinst_HEADERS = cdc.h rabin-checksum.h\n\nlibcdc_la_SOURCES = cdc.c rabin-checksum.c\n\nlibcdc_la_LDFLAGS = -Wl,-z -Wl,defs\nlibcdc_la_LIBADD = @SSL_LIBS@ @GLIB2_LIBS@ \\\n\t$(top_builddir)/lib/libseafile_common.la\n"
  },
  {
    "path": "common/cdc/cdc.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"log.h\"\n\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <fcntl.h>\n#include <string.h>\n#include <sys/stat.h>\n#include <errno.h>\n#include <glib/gstdio.h>\n\n#include \"utils.h\"\n\n#include \"cdc.h\"\n#include \"../seafile-crypt.h\"\n\n#include \"rabin-checksum.h\"\n#define finger rabin_checksum\n#define rolling_finger rabin_rolling_checksum\n\n#define BLOCK_SZ        (1024*1024*1)\n#define BLOCK_MIN_SZ    (1024*256)\n#define BLOCK_MAX_SZ    (1024*1024*4)\n#define BLOCK_WIN_SZ    48\n\n#define NAME_MAX_SZ     4096\n\n#define BREAK_VALUE     0x0013    ///0x0513\n\n#define READ_SIZE 1024 * 4\n\n#define BYTE_TO_HEX(b)  (((b)>=10)?('a'+b-10):('0'+b))\n\nstatic int default_write_chunk (CDCDescriptor *chunk_descr)\n{\n    char filename[NAME_MAX_SZ];\n    char chksum_str[CHECKSUM_LENGTH *2 + 1];\n    int fd_chunk, ret;\n\n    memset(chksum_str, 0, sizeof(chksum_str));\n    rawdata_to_hex (chunk_descr->checksum, chksum_str, CHECKSUM_LENGTH);\n    snprintf (filename, NAME_MAX_SZ, \"./%s\", chksum_str);\n    fd_chunk = g_open (filename, O_RDWR | O_CREAT | O_BINARY, 0644);\n    if (fd_chunk < 0)\n        return -1;    \n    \n    ret = writen (fd_chunk, chunk_descr->block_buf, chunk_descr->len);\n    close (fd_chunk);\n    return ret;\n}\n\nstatic int init_cdc_file_descriptor (int fd,\n                                     uint64_t file_size,\n                                     CDCFileDescriptor *file_descr)\n{\n    int max_block_nr = 0;\n    int block_min_sz = 0;\n\n    file_descr->block_nr = 0;\n\n    if (file_descr->block_min_sz <= 0)\n        file_descr->block_min_sz = BLOCK_MIN_SZ;\n    if (file_descr->block_max_sz <= 0)\n        file_descr->block_max_sz = BLOCK_MAX_SZ;\n    if (file_descr->block_sz <= 0)\n        file_descr->block_sz = BLOCK_SZ;\n\n    if (file_descr->write_block == NULL)\n        file_descr->write_block = (WriteblockFunc)default_write_chunk;\n\n    block_min_sz = file_descr->block_min_sz;\n    max_block_nr = ((file_size + block_min_sz - 1) / block_min_sz);\n    file_descr->blk_sha1s = (uint8_t *)calloc (sizeof(uint8_t),\n                                               max_block_nr * CHECKSUM_LENGTH);\n    file_descr->max_block_nr = max_block_nr;\n\n    return 0;\n}\n\n#define WRITE_CDC_BLOCK(block_sz, write_data)                \\\ndo {                                                         \\\n    int _block_sz = (block_sz);                              \\\n    chunk_descr.len = _block_sz;                             \\\n    chunk_descr.offset = offset;                             \\\n    ret = file_descr->write_block (file_descr->repo_id,      \\\n                                   file_descr->version,      \\\n                                   &chunk_descr,             \\\n            crypt, chunk_descr.checksum,                     \\\n                                   (write_data));            \\\n    if (ret < 0) {                                           \\\n        free (buf);                                          \\\n        g_warning (\"CDC: failed to write chunk.\\n\");         \\\n        return -1;                                           \\\n    }                                                        \\\n    memcpy (file_descr->blk_sha1s +                          \\\n            file_descr->block_nr * CHECKSUM_LENGTH,          \\\n            chunk_descr.checksum, CHECKSUM_LENGTH);          \\\n    SHA1_Update (&file_ctx, chunk_descr.checksum, 20);       \\\n    file_descr->block_nr++;                                  \\\n    offset += _block_sz;                                     \\\n                                                             \\\n    memmove (buf, buf + _block_sz, tail - _block_sz);        \\\n    tail = tail - _block_sz;                                 \\\n    cur = 0;                                                 \\\n}while(0);\n\n/* content-defined chunking */\nint file_chunk_cdc(int fd_src,\n                   CDCFileDescriptor *file_descr,\n                   SeafileCrypt *crypt,\n                   gboolean write_data,\n                   gint64 *indexed)\n{\n    char *buf;\n    uint32_t buf_sz;\n    SHA_CTX file_ctx;\n    CDCDescriptor chunk_descr;\n    SHA1_Init (&file_ctx);\n\n    SeafStat sb;\n    if (seaf_fstat (fd_src, &sb) < 0) {\n        seaf_warning (\"CDC: failed to stat: %s.\\n\", strerror(errno));\n        return -1;\n    }\n    uint64_t expected_size = sb.st_size;\n\n    init_cdc_file_descriptor (fd_src, expected_size, file_descr);\n    uint32_t block_min_sz = file_descr->block_min_sz;\n    uint32_t block_mask = file_descr->block_sz - 1;\n\n    int fingerprint = 0;\n    int offset = 0;\n    int ret = 0;\n    int tail, cur, rsize;\n\n    buf_sz = file_descr->block_max_sz;\n    buf = chunk_descr.block_buf = malloc (buf_sz);\n    if (!buf)\n        return -1;\n\n    /* buf: a fix-sized buffer.\n     * cur: data behind (inclusive) this offset has been scanned.\n     *      cur + 1 is the bytes that has been scanned.\n     * tail: length of data loaded into memory. buf[tail] is invalid.\n     */\n    tail = cur = 0;\n    while (1) {\n        if (tail < block_min_sz) {\n            rsize = block_min_sz - tail + READ_SIZE;\n        } else {\n            rsize = (buf_sz - tail < READ_SIZE) ? (buf_sz - tail) : READ_SIZE;\n        }\n        ret = readn (fd_src, buf + tail, rsize);\n        if (ret < 0) {\n            seaf_warning (\"CDC: failed to read: %s.\\n\", strerror(errno));\n            free (buf);\n            return -1;\n        }\n        tail += ret;\n        file_descr->file_size += ret;\n\n        if (file_descr->file_size > expected_size) {\n            seaf_warning (\"File size changed while chunking.\\n\");\n            free (buf);\n            return -1;\n        }\n\n        /* We've read all the data in this file. Output the block immediately\n         * in two cases:\n         * 1. The data left in the file is less than block_min_sz;\n         * 2. We cannot find the break value until the end of this file.\n         */\n        if (tail < block_min_sz || cur >= tail) {\n            if (tail > 0) {\n                if (file_descr->block_nr == file_descr->max_block_nr) {\n                    seaf_warning (\"Block id array is not large enough, bail out.\\n\");\n                    free (buf);\n                    return -1;\n                }\n                gint64 idx_size = tail;\n                WRITE_CDC_BLOCK (tail, write_data);\n                if (indexed)\n                    *indexed += idx_size;\n            }\n            break;\n        }\n\n        /* \n         * A block is at least of size block_min_sz.\n         */\n        if (cur < block_min_sz - 1)\n            cur = block_min_sz - 1;\n\n        while (cur < tail) {\n            fingerprint = (cur == block_min_sz - 1) ?\n                finger(buf + cur - BLOCK_WIN_SZ + 1, BLOCK_WIN_SZ) :\n                rolling_finger (fingerprint, BLOCK_WIN_SZ, \n                                *(buf+cur-BLOCK_WIN_SZ), *(buf + cur));\n\n            /* get a chunk, write block info to chunk file */\n            if (((fingerprint & block_mask) ==  ((BREAK_VALUE & block_mask)))\n                || cur + 1 >= file_descr->block_max_sz)\n            {\n                if (file_descr->block_nr == file_descr->max_block_nr) {\n                    seaf_warning (\"Block id array is not large enough, bail out.\\n\");\n                    free (buf);\n                    return -1;\n                }\n                gint64 idx_size = cur + 1;\n                WRITE_CDC_BLOCK (cur + 1, write_data);\n                if (indexed)\n                    *indexed += idx_size;\n                break;\n            } else {\n                cur ++;\n            }\n        }\n    }\n\n    SHA1_Final (file_descr->file_sum, &file_ctx);\n\n    free (buf);\n\n    return 0;\n}\n\nint filename_chunk_cdc(const char *filename,\n                       CDCFileDescriptor *file_descr,\n                       SeafileCrypt *crypt,\n                       gboolean write_data,\n                       gint64 *indexed)\n{\n    int fd_src = seaf_util_open (filename, O_RDONLY | O_BINARY);\n    if (fd_src < 0) {\n        seaf_warning (\"CDC: failed to open %s.\\n\", filename);\n        return -1;\n    }\n\n    int ret = file_chunk_cdc (fd_src, file_descr, crypt, write_data, indexed);\n    close (fd_src);\n    return ret;\n}\n\nvoid cdc_init ()\n{\n    rabin_init (BLOCK_WIN_SZ);\n}\n"
  },
  {
    "path": "common/cdc/cdc.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef _CDC_H\n#define _CDC_H\n\n#include <glib.h>\n#include <stdint.h>\n\n#ifdef HAVE_MD5\n#include \"md5.h\"\n#define get_checksum md5\n#define CHECKSUM_LENGTH 16\n#else\n#include <openssl/sha.h>\n#define get_checksum sha1\n#define CHECKSUM_LENGTH 20\n#endif\n\n#ifndef O_BINARY\n#define O_BINARY 0\n#endif\n\nstruct _CDCFileDescriptor;\nstruct _CDCDescriptor;\nstruct SeafileCrypt;\n\ntypedef int (*WriteblockFunc)(const char *repo_id,\n                              int version,\n                              struct _CDCDescriptor *chunk_descr,\n                              struct SeafileCrypt *crypt,\n                              uint8_t *checksum,\n                              gboolean write_data);\n\n/* define chunk file header and block entry */\ntypedef struct _CDCFileDescriptor {\n    uint32_t block_min_sz;\n    uint32_t block_max_sz;\n    uint32_t block_sz;\n    uint64_t file_size;\n\n    uint32_t block_nr;\n    uint8_t *blk_sha1s;\n    int max_block_nr;\n    uint8_t  file_sum[CHECKSUM_LENGTH];\n\n    WriteblockFunc write_block;\n\n    char repo_id[37];\n    int version;\n} CDCFileDescriptor;\n\ntypedef struct _CDCDescriptor {\n    uint64_t offset;\n    uint32_t len;\n    uint8_t  checksum[CHECKSUM_LENGTH];\n    char    *block_buf;\n    int result;\n} CDCDescriptor;\n\nint file_chunk_cdc(int fd_src,\n                   CDCFileDescriptor *file_descr,\n                   struct SeafileCrypt *crypt,\n                   gboolean write_data,\n                   gint64 *indexed);\n\nint filename_chunk_cdc(const char *filename,\n                       CDCFileDescriptor *file_descr,\n                       struct SeafileCrypt *crypt,\n                       gboolean write_data,\n                       gint64 *indexed);\n\nvoid cdc_init ();\n\n#endif\n"
  },
  {
    "path": "common/cdc/rabin-checksum.c",
    "content": "#include <sys/types.h>\n#include \"rabin-checksum.h\"\n\n#ifdef WIN32\n#include <stdint.h>\n#ifndef u_int\ntypedef unsigned int u_int;\n#endif\n\n#ifndef u_char\ntypedef unsigned char u_char;\n#endif\n\n#ifndef u_short\ntypedef unsigned short u_short;\n#endif\n\n#ifndef u_long\ntypedef unsigned long u_long;\n#endif\n\n#ifndef u_int16_t\ntypedef uint16_t u_int16_t;\n#endif\n\n#ifndef u_int32_t\ntypedef uint32_t u_int32_t;\n#endif\n\n#ifndef u_int64_t\ntypedef uint64_t u_int64_t;\n#endif\n#endif\n\n#define INT64(n) n##LL\n#define MSB64 INT64(0x8000000000000000)\n\nstatic u_int64_t poly = 0xbfe6b8a5bf378d83LL;\nstatic u_int64_t T[256];\nstatic u_int64_t U[256];\nstatic int shift;\n\n/* Highest bit set in a byte */\nstatic const char bytemsb[0x100] = {\n  0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,\n  5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,\n  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,\n  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,\n  7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,\n  7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,\n};\n\n/* Find last set (most significant bit) */\nstatic inline u_int fls32 (u_int32_t v)\n{\n    if (v & 0xffff0000) {\n        if (v & 0xff000000)\n            return 24 + bytemsb[v>>24];\n        else\n            return 16 + bytemsb[v>>16];\n    }\n    if (v & 0x0000ff00)\n        return 8 + bytemsb[v>>8];\n    else\n        return bytemsb[v];\n}\n\nstatic inline char fls64 (u_int64_t v)\n{\n    u_int32_t h;\n    if ((h = v >> 32))\n        return 32 + fls32 (h);\n    else\n        return fls32 ((u_int32_t) v);\n}\n\nu_int64_t polymod (u_int64_t nh, u_int64_t nl, u_int64_t d)\n{\n    int i = 0;\n    int k = fls64 (d) - 1;\n\n    d <<= 63 - k;\n\n    if (nh) {\n        if (nh & MSB64)\n            nh ^= d;\n        for (i = 62; i >= 0; i--)\n            if (nh & ((u_int64_t) 1) << i) {\n                nh ^= d >> (63 - i);\n                nl ^= d << (i + 1);\n            }\n    }\n    for (i = 63; i >= k; i--)\n    {  \n        if (nl & INT64 (1) << i)\n            nl ^= d >> (63 - i);\n    }\n  \n    return nl;\n}\n\nvoid polymult (u_int64_t *php, u_int64_t *plp, u_int64_t x, u_int64_t y)\n{\n    int i;\n    u_int64_t ph = 0, pl = 0;\n    if (x & 1)\n        pl = y;\n    for (i = 1; i < 64; i++)\n        if (x & (INT64 (1) << i)) {\n            ph ^= y >> (64 - i);\n            pl ^= y << i;\n        }\n    if (php)\n        *php = ph;\n    if (plp)\n        *plp = pl;\n}\n\nu_int64_t polymmult (u_int64_t x, u_int64_t y, u_int64_t d)\n{\n    u_int64_t h, l;\n    polymult (&h, &l, x, y);\n    return polymod (h, l, d);\n}\n\nstatic u_int64_t append8 (u_int64_t p, u_char m)\n{\n    return ((p << 8) | m) ^ T[p >> shift];\n}\n\nstatic void calcT (u_int64_t poly)\n{\n    int j = 0;\n    int xshift = fls64 (poly) - 1;\n    shift = xshift - 8;\n    u_int64_t T1 = polymod (0, INT64 (1) << xshift, poly);\n    for (j = 0; j < 256; j++) {\n        T[j] = polymmult (j, T1, poly) | ((u_int64_t) j << xshift);\n    }\n}\n\nstatic void calcU(int size)\n{\n    int i;\n    u_int64_t sizeshift = 1;\n    for (i = 1; i < size; i++)\n        sizeshift = append8 (sizeshift, 0);\n    for (i = 0; i < 256; i++)\n        U[i] = polymmult (i, sizeshift, poly);\n}\n\nvoid rabin_init(int len)\n{\n    calcT(poly);\n    calcU(len);\n}\n\n/*\n *   a simple 32 bit checksum that can be upadted from end\n */\nunsigned int rabin_checksum(char *buf, int len)\n{\n    int i;\n    unsigned int sum = 0;\n    for (i = 0; i < len; ++i) {\n        sum = rabin_rolling_checksum (sum, len, 0, buf[i]);\n    }\n    return sum;\n}\n\nunsigned int rabin_rolling_checksum(unsigned int csum, int len,\n                                    char c1, char c2)\n{\n    return append8(csum ^ U[(unsigned char)c1], c2);\n}\n"
  },
  {
    "path": "common/cdc/rabin-checksum.h",
    "content": "#ifndef _RABIN_CHECKSUM_H\n#define _RABIN_CHECKSUM_H\n\nunsigned int rabin_checksum(char *buf, int len);\n\nunsigned int rabin_rolling_checksum(unsigned int csum, int len, char c1, char c2);\n\nvoid rabin_init (int len);\n\n#endif\n"
  },
  {
    "path": "common/commit-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"log.h\"\n\n#include <jansson.h>\n#include <openssl/sha.h>\n\n#include \"utils.h\"\n#include \"db.h\"\n#include \"searpc-utils.h\"\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"seaf-utils.h\"\n\n#define MAX_TIME_SKEW 259200    /* 3 days */\n\nstruct _SeafCommitManagerPriv {\n    int dummy;\n};\n\nstatic SeafCommit *\nload_commit (SeafCommitManager *mgr,\n             const char *repo_id, int version,\n             const char *commit_id);\nstatic int\nsave_commit (SeafCommitManager *manager,\n             const char *repo_id, int version,\n             SeafCommit *commit);\nstatic void\ndelete_commit (SeafCommitManager *mgr,\n               const char *repo_id, int version,\n               const char *id);\nstatic json_t *\ncommit_to_json_object (SeafCommit *commit);\nstatic SeafCommit *\ncommit_from_json_object (const char *id, json_t *object);\n\nstatic void compute_commit_id (SeafCommit* commit)\n{\n    SHA_CTX ctx;\n    uint8_t sha1[20];    \n    gint64 ctime_n;\n\n    SHA1_Init (&ctx);\n    SHA1_Update (&ctx, commit->root_id, 41);\n    SHA1_Update (&ctx, commit->creator_id, 41);\n    if (commit->creator_name)\n        SHA1_Update (&ctx, commit->creator_name, strlen(commit->creator_name)+1);\n    SHA1_Update (&ctx, commit->desc, strlen(commit->desc)+1);\n\n    /* convert to network byte order */\n    ctime_n = hton64 (commit->ctime);\n    SHA1_Update (&ctx, &ctime_n, sizeof(ctime_n));\n    SHA1_Final (sha1, &ctx);\n    \n    rawdata_to_hex (sha1, commit->commit_id, 20);\n}\n\nSeafCommit*\nseaf_commit_new (const char *commit_id,\n                 const char *repo_id,\n                 const char *root_id,\n                 const char *creator_name,\n                 const char *creator_id,\n                 const char *desc,\n                 guint64 ctime)\n{\n    SeafCommit *commit;\n\n    g_return_val_if_fail (repo_id != NULL, NULL);\n    g_return_val_if_fail (root_id != NULL && creator_id != NULL, NULL);\n\n    commit = g_new0 (SeafCommit, 1);\n\n    memcpy (commit->repo_id, repo_id, 36);\n    commit->repo_id[36] = '\\0';\n    \n    memcpy (commit->root_id, root_id, 40);\n    commit->root_id[40] = '\\0';\n\n    commit->creator_name = g_strdup (creator_name);\n\n    memcpy (commit->creator_id, creator_id, 40);\n    commit->creator_id[40] = '\\0';\n\n    commit->desc = g_strdup (desc);\n    \n    if (ctime == 0) {\n        /* TODO: use more precise timer */\n        commit->ctime = (gint64)time(NULL);\n    } else\n        commit->ctime = ctime;\n\n    if (commit_id == NULL)\n        compute_commit_id (commit);\n    else {\n        memcpy (commit->commit_id, commit_id, 40);\n        commit->commit_id[40] = '\\0';        \n    }\n\n    commit->ref = 1;\n    return commit;\n}\n\nchar *\nseaf_commit_to_data (SeafCommit *commit, gsize *len)\n{\n    json_t *object;\n    char *json_data;\n    char *ret;\n\n    object = commit_to_json_object (commit);\n\n    json_data = json_dumps (object, 0);\n    *len = strlen (json_data);\n    json_decref (object);\n\n    ret = g_strdup (json_data);\n    free (json_data);\n    return ret;\n}\n\nSeafCommit *\nseaf_commit_from_data (const char *id, char *data, gsize len)\n{\n    json_t *object;\n    SeafCommit *commit;\n    json_error_t jerror;\n\n    object = json_loadb (data, len, 0, &jerror);\n    if (!object) {\n        /* Perhaps the commit object contains invalid UTF-8 character. */\n        if (data[len-1] == 0)\n            clean_utf8_data (data, len - 1);\n        else\n            clean_utf8_data (data, len);\n\n        object = json_loadb (data, len, 0, &jerror);\n        if (!object) {\n            if (jerror.text)\n                seaf_warning (\"Failed to load commit json: %s.\\n\", jerror.text);\n            else\n                seaf_warning (\"Failed to load commit json.\\n\");\n            return NULL;\n        }\n    }\n\n    commit = commit_from_json_object (id, object);\n\n    json_decref (object);\n\n    return commit;\n}\n\nstatic void\nseaf_commit_free (SeafCommit *commit)\n{\n    g_free (commit->desc);\n    g_free (commit->creator_name);\n    if (commit->parent_id) g_free (commit->parent_id);\n    if (commit->second_parent_id) g_free (commit->second_parent_id);\n    if (commit->repo_name) g_free (commit->repo_name);\n    if (commit->repo_desc) g_free (commit->repo_desc);\n    if (commit->device_name) g_free (commit->device_name);\n    if (commit->repo_category) g_free (commit->repo_category);\n    if (commit->salt) g_free (commit->salt);\n    g_free (commit->client_version);\n    g_free (commit->magic);\n    g_free (commit->random_key);\n    g_free (commit->pwd_hash);\n    g_free (commit->pwd_hash_algo);\n    g_free (commit->pwd_hash_params);\n    g_free (commit);\n}\n\nvoid\nseaf_commit_ref (SeafCommit *commit)\n{\n    commit->ref++;\n}\n\nvoid\nseaf_commit_unref (SeafCommit *commit)\n{\n    if (!commit)\n        return;\n\n    if (--commit->ref <= 0)\n        seaf_commit_free (commit);\n}\n\nSeafCommitManager*\nseaf_commit_manager_new (SeafileSession *seaf)\n{\n    SeafCommitManager *mgr = g_new0 (SeafCommitManager, 1);\n\n    mgr->priv = g_new0 (SeafCommitManagerPriv, 1);\n    mgr->seaf = seaf;\n    mgr->obj_store = seaf_obj_store_new (mgr->seaf, \"commits\");\n\n    return mgr;\n}\n\nint\nseaf_commit_manager_init (SeafCommitManager *mgr)\n{\n    if (seaf_obj_store_init (mgr->obj_store) < 0) {\n        seaf_warning (\"[commit mgr] Failed to init commit object store.\\n\");\n        return -1;\n    }\n    return 0;\n}\n\n#if 0\ninline static void\nadd_commit_to_cache (SeafCommitManager *mgr, SeafCommit *commit)\n{\n    g_hash_table_insert (mgr->priv->commit_cache,\n                         g_strdup(commit->commit_id),\n                         commit);\n    seaf_commit_ref (commit);\n}\n\ninline static void\nremove_commit_from_cache (SeafCommitManager *mgr, SeafCommit *commit)\n{\n    g_hash_table_remove (mgr->priv->commit_cache, commit->commit_id);\n    seaf_commit_unref (commit);\n}\n#endif\n\nint\nseaf_commit_manager_add_commit (SeafCommitManager *mgr,\n                                SeafCommit *commit)\n{\n    int ret;\n\n    /* add_commit_to_cache (mgr, commit); */\n    if ((ret = save_commit (mgr, commit->repo_id, commit->version, commit)) < 0)\n        return -1;\n    \n    return 0;\n}\n\nvoid\nseaf_commit_manager_del_commit (SeafCommitManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *id)\n{\n    g_return_if_fail (id != NULL);\n\n#if 0\n    commit = g_hash_table_lookup(mgr->priv->commit_cache, id);\n    if (!commit)\n        goto delete;\n\n    /*\n     * Catch ref count bug here. We have bug in commit ref, the\n     * following assert can't pass. TODO: fix the commit ref bug\n     */\n    /* g_assert (commit->ref <= 1); */\n    remove_commit_from_cache (mgr, commit);\n\ndelete:\n#endif\n\n    delete_commit (mgr, repo_id, version, id);\n}\n\nSeafCommit* \nseaf_commit_manager_get_commit (SeafCommitManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *id)\n{\n    SeafCommit *commit;\n\n#if 0\n    commit = g_hash_table_lookup (mgr->priv->commit_cache, id);\n    if (commit != NULL) {\n        seaf_commit_ref (commit);\n        return commit;\n    }\n#endif\n\n    commit = load_commit (mgr, repo_id, version, id);\n    if (!commit)\n        return NULL;\n\n    /* add_commit_to_cache (mgr, commit); */\n\n    return commit;\n}\n\nSeafCommit *\nseaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr,\n                                           const char *repo_id,\n                                           const char *id)\n{\n    SeafCommit *commit = NULL;\n\n    /* First try version 1 layout. */\n    commit = seaf_commit_manager_get_commit (mgr, repo_id, 1, id);\n    if (commit)\n        return commit;\n\n#if defined MIGRATION || defined SEAFILE_CLIENT\n    /* For compatibility with version 0. */\n    commit = seaf_commit_manager_get_commit (mgr, repo_id, 0, id);\n#endif\n    return commit;\n}\n\nstatic gint\ncompare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused)\n{\n    const SeafCommit *commit_a = a;\n    const SeafCommit *commit_b = b;\n\n    /* Latest commit comes first in the list. */\n    return (commit_b->ctime - commit_a->ctime);\n}\n\ninline static int\ninsert_parent_commit (GList **list, GHashTable *hash,\n                      const char *repo_id, int version,\n                      const char *parent_id, gboolean allow_truncate)\n{\n    SeafCommit *p;\n    char *key;\n\n    if (g_hash_table_lookup (hash, parent_id) != NULL)\n        return 0;\n\n    p = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                        repo_id, version,\n                                        parent_id);\n    if (!p) {\n        if (allow_truncate)\n            return 0;\n        seaf_warning (\"Failed to find commit %s\\n\", parent_id);\n        return -1;\n    }\n\n    *list = g_list_insert_sorted_with_data (*list, p,\n                                           compare_commit_by_time,\n                                           NULL);\n\n    key = g_strdup (parent_id);\n    g_hash_table_replace (hash, key, key);\n\n    return 0;\n}\n\ngboolean\nseaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr,\n                                                     const char *repo_id,\n                                                     int version,\n                                                     const char *head,\n                                                     CommitTraverseFunc func,\n                                                     int limit,\n                                                     void *data,\n                                                     char **next_start_commit,\n                                                     gboolean skip_errors)\n{\n    SeafCommit *commit;\n    GList *list = NULL;\n    GHashTable *commit_hash;\n    gboolean ret = TRUE;\n\n    /* A hash table for recording id of traversed commits. */\n    commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head);\n    if (!commit) {\n        seaf_warning (\"Failed to find commit %s.\\n\", head);\n        g_hash_table_destroy (commit_hash);\n        return FALSE;\n    }\n\n    list = g_list_insert_sorted_with_data (list, commit,\n                                           compare_commit_by_time,\n                                           NULL);\n\n    char *key = g_strdup (commit->commit_id);\n    g_hash_table_replace (commit_hash, key, key);\n\n    int count = 0;\n    while (list) {\n        gboolean stop = FALSE;\n        commit = list->data;\n        list = g_list_delete_link (list, list);\n\n        if (!func (commit, data, &stop)) {\n            if (!skip_errors) {\n                seaf_commit_unref (commit);\n                ret = FALSE;\n                goto out;\n            }\n        }\n\n        if (stop) {\n            seaf_commit_unref (commit);\n            /* stop traverse down from this commit,\n             * but not stop traversing the tree \n             */\n            continue;\n        }\n\n        if (commit->parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo_id, version,\n                                      commit->parent_id, FALSE) < 0) {\n                if (!skip_errors) {\n                    seaf_commit_unref (commit);\n                    ret = FALSE;\n                    goto out;\n                }\n            }\n        }\n        if (commit->second_parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo_id, version,\n                                      commit->second_parent_id, FALSE) < 0) {\n                if (!skip_errors) {\n                    seaf_commit_unref (commit);\n                    ret = FALSE;\n                    goto out;\n                }\n            }\n        }\n        seaf_commit_unref (commit);\n\n        /* Stop when limit is reached and don't stop at unmerged branch.\n         * If limit < 0, there is no limit;\n         */\n        if (limit > 0 && ++count >= limit && (!list || !list->next)) {\n            break;\n        }\n    }\n    /*\n     * two scenarios:\n     * 1. list is empty, indicate scan end\n     * 2. list only have one commit, as start for next scan\n     */\n    if (list) {\n        commit = list->data;\n        if (next_start_commit) {\n            *next_start_commit= g_strdup (commit->commit_id);\n        }\n        seaf_commit_unref (commit);\n        list = g_list_delete_link (list, list);\n    }\n\nout:\n    g_hash_table_destroy (commit_hash);\n    while (list) {\n        commit = list->data;\n        seaf_commit_unref (commit);\n        list = g_list_delete_link (list, list);\n    }\n    return ret;\n}\n\nstatic gboolean\ntraverse_commit_tree_common (SeafCommitManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *head,\n                             CommitTraverseFunc func,\n                             void *data,\n                             gboolean skip_errors,\n                             gboolean allow_truncate)\n{\n    SeafCommit *commit;\n    GList *list = NULL;\n    GHashTable *commit_hash;\n    gboolean ret = TRUE;\n\n    commit = seaf_commit_manager_get_commit (mgr, repo_id, version, head);\n    if (!commit) {\n        seaf_warning (\"Failed to find commit %s.\\n\", head);\n        // For head commit damaged, directly return FALSE\n        // user can repair head by fsck then retraverse the tree\n        return FALSE;\n    }\n\n    /* A hash table for recording id of traversed commits. */\n    commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    list = g_list_insert_sorted_with_data (list, commit,\n                                           compare_commit_by_time,\n                                           NULL);\n\n    char *key = g_strdup (commit->commit_id);\n    g_hash_table_replace (commit_hash, key, key);\n\n    while (list) {\n        gboolean stop = FALSE;\n        commit = list->data;\n        list = g_list_delete_link (list, list);\n\n        if (!func (commit, data, &stop)) {\n            seaf_warning(\"[comit-mgr] CommitTraverseFunc failed\\n\");\n\n            /* If skip errors, continue to traverse parents. */\n            if (!skip_errors) {\n                seaf_commit_unref (commit);\n                ret = FALSE;\n                goto out;\n            }\n        }\n        if (stop) {\n            seaf_commit_unref (commit);\n            /* stop traverse down from this commit,\n             * but not stop traversing the tree \n             */\n            continue;\n        }\n\n        if (commit->parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo_id, version,\n                                      commit->parent_id, allow_truncate) < 0) {\n                seaf_warning(\"[comit-mgr] insert parent commit failed\\n\");\n\n                /* If skip errors, try insert second parent. */\n                if (!skip_errors) {\n                    seaf_commit_unref (commit);\n                    ret = FALSE;\n                    goto out;\n                }\n            }\n        }\n        if (commit->second_parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo_id, version,\n                                      commit->second_parent_id, allow_truncate) < 0) {\n                seaf_warning(\"[comit-mgr]insert second parent commit failed\\n\");\n\n                if (!skip_errors) {\n                    seaf_commit_unref (commit);\n                    ret = FALSE;\n                    goto out;\n                }\n            }\n        }\n        seaf_commit_unref (commit);\n    }\n\nout:\n    g_hash_table_destroy (commit_hash);\n    while (list) {\n        commit = list->data;\n        seaf_commit_unref (commit);\n        list = g_list_delete_link (list, list);\n    }\n    return ret;\n}\n\ngboolean\nseaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr,\n                                          const char *repo_id,\n                                          int version,\n                                          const char *head,\n                                          CommitTraverseFunc func,\n                                          void *data,\n                                          gboolean skip_errors)\n{\n    return traverse_commit_tree_common (mgr, repo_id, version, head,\n                                        func, data, skip_errors, FALSE);\n}\n\ngboolean\nseaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr,\n                                                    const char *repo_id,\n                                                    int version,\n                                                    const char *head,\n                                                    CommitTraverseFunc func,\n                                                    void *data,\n                                                    gboolean skip_errors)\n{\n    return traverse_commit_tree_common (mgr, repo_id, version, head,\n                                        func, data, skip_errors, TRUE);\n}\n\ngboolean\nseaf_commit_manager_commit_exists (SeafCommitManager *mgr,\n                                   const char *repo_id,\n                                   int version,\n                                   const char *id)\n{\n#if 0\n    commit = g_hash_table_lookup (mgr->priv->commit_cache, id);\n    if (commit != NULL)\n        return TRUE;\n#endif\n\n    return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id);\n}\n\nstatic json_t *\ncommit_to_json_object (SeafCommit *commit)\n{\n    json_t *object;\n    \n    object = json_object ();\n \n    json_object_set_string_member (object, \"commit_id\", commit->commit_id);\n    json_object_set_string_member (object, \"root_id\", commit->root_id);\n    json_object_set_string_member (object, \"repo_id\", commit->repo_id);\n    if (commit->creator_name)\n        json_object_set_string_member (object, \"creator_name\", commit->creator_name);\n    json_object_set_string_member (object, \"creator\", commit->creator_id);\n    json_object_set_string_member (object, \"description\", commit->desc);\n    json_object_set_int_member (object, \"ctime\", (gint64)commit->ctime);\n    json_object_set_string_or_null_member (object, \"parent_id\", commit->parent_id);\n    json_object_set_string_or_null_member (object, \"second_parent_id\",\n                                           commit->second_parent_id);\n    /*\n     * also save repo's properties to commit file, for easy sharing of\n     * repo info \n     */\n    json_object_set_string_member (object, \"repo_name\", commit->repo_name);\n    json_object_set_string_member (object, \"repo_desc\",\n                                   commit->repo_desc);\n    json_object_set_string_or_null_member (object, \"repo_category\",\n                                           commit->repo_category);\n    if (commit->device_name)\n        json_object_set_string_member (object, \"device_name\", commit->device_name);\n    if (commit->client_version)\n        json_object_set_string_member (object, \"client_version\", commit->client_version);\n\n    if (commit->encrypted)\n        json_object_set_string_member (object, \"encrypted\", \"true\");\n\n    if (commit->encrypted) {\n        json_object_set_int_member (object, \"enc_version\", commit->enc_version);\n        // If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo.\n        if (commit->enc_version >= 1 && !commit->pwd_hash)\n            json_object_set_string_member (object, \"magic\", commit->magic);\n        if (commit->enc_version >= 2)\n            json_object_set_string_member (object, \"key\", commit->random_key);\n        if (commit->enc_version >= 3)\n            json_object_set_string_member (object, \"salt\", commit->salt);\n        if (commit->pwd_hash) {\n            json_object_set_string_member (object, \"pwd_hash\", commit->pwd_hash);\n            json_object_set_string_member (object, \"pwd_hash_algo\", commit->pwd_hash_algo);\n            json_object_set_string_member (object, \"pwd_hash_params\", commit->pwd_hash_params);\n        }\n    }\n    if (commit->no_local_history)\n        json_object_set_int_member (object, \"no_local_history\", 1);\n    if (commit->version != 0)\n        json_object_set_int_member (object, \"version\", commit->version);\n    if (commit->conflict)\n        json_object_set_int_member (object, \"conflict\", 1);\n    if (commit->new_merge)\n        json_object_set_int_member (object, \"new_merge\", 1);\n    if (commit->repaired)\n        json_object_set_int_member (object, \"repaired\", 1);\n\n    return object;\n}\n\nstatic SeafCommit *\ncommit_from_json_object (const char *commit_id, json_t *object)\n{\n    SeafCommit *commit = NULL;\n    const char *root_id;\n    const char *repo_id;\n    const char *creator_name = NULL;\n    const char *creator;\n    const char *desc;\n    gint64 ctime;\n    const char *parent_id, *second_parent_id;\n    const char *repo_name;\n    const char *repo_desc;\n    const char *repo_category;\n    const char *device_name;\n    const char *client_version;\n    const char *encrypted = NULL;\n    int enc_version = 0;\n    const char *magic = NULL;\n    const char *random_key = NULL;\n    const char *salt = NULL;\n    const char *pwd_hash = NULL;\n    const char *pwd_hash_algo = NULL;\n    const char *pwd_hash_params = NULL;\n    int no_local_history = 0;\n    int version = 0;\n    int conflict = 0, new_merge = 0;\n    int repaired = 0;\n\n    root_id = json_object_get_string_member (object, \"root_id\");\n    repo_id = json_object_get_string_member (object, \"repo_id\");\n    if (json_object_has_member (object, \"creator_name\"))\n        creator_name = json_object_get_string_or_null_member (object, \"creator_name\");\n    creator = json_object_get_string_member (object, \"creator\");\n    desc = json_object_get_string_member (object, \"description\");\n    if (!desc)\n        desc = \"\";\n    ctime = (guint64) json_object_get_int_member (object, \"ctime\");\n    parent_id = json_object_get_string_or_null_member (object, \"parent_id\");\n    second_parent_id = json_object_get_string_or_null_member (object, \"second_parent_id\");\n\n    repo_name = json_object_get_string_member (object, \"repo_name\");\n    if (!repo_name)\n        repo_name = \"\";\n    repo_desc = json_object_get_string_member (object, \"repo_desc\");\n    if (!repo_desc)\n        repo_desc = \"\";\n    repo_category = json_object_get_string_or_null_member (object, \"repo_category\");\n    device_name = json_object_get_string_or_null_member (object, \"device_name\");\n    client_version = json_object_get_string_or_null_member (object, \"client_version\");\n\n    if (json_object_has_member (object, \"encrypted\"))\n        encrypted = json_object_get_string_or_null_member (object, \"encrypted\");\n\n    if (encrypted && strcmp(encrypted, \"true\") == 0\n        && json_object_has_member (object, \"enc_version\")) {\n        enc_version = json_object_get_int_member (object, \"enc_version\");\n        magic = json_object_get_string_member (object, \"magic\");\n        pwd_hash = json_object_get_string_member (object, \"pwd_hash\");\n        pwd_hash_algo = json_object_get_string_member (object, \"pwd_hash_algo\");\n        pwd_hash_params = json_object_get_string_member (object, \"pwd_hash_params\");\n    }\n\n    if (enc_version >= 2)\n        random_key = json_object_get_string_member (object, \"key\");\n    if (enc_version >= 3)\n        salt = json_object_get_string_member (object, \"salt\");\n\n    if (json_object_has_member (object, \"no_local_history\"))\n        no_local_history = json_object_get_int_member (object, \"no_local_history\");\n\n    if (json_object_has_member (object, \"version\"))\n        version = json_object_get_int_member (object, \"version\");\n    if (json_object_has_member (object, \"new_merge\"))\n        new_merge = json_object_get_int_member (object, \"new_merge\");\n\n    if (json_object_has_member (object, \"conflict\"))\n        conflict = json_object_get_int_member (object, \"conflict\");\n\n    if (json_object_has_member (object, \"repaired\"))\n        repaired = json_object_get_int_member (object, \"repaired\");\n\n\n    /* sanity check for incoming values. */\n    if (!repo_id || !is_uuid_valid(repo_id)  ||\n        !root_id || !is_object_id_valid(root_id) ||\n        !creator || strlen(creator) != 40 ||\n        (parent_id && !is_object_id_valid(parent_id)) ||\n        (second_parent_id && !is_object_id_valid(second_parent_id)))\n        return commit;\n\n    // If pwd_hash is set, the magic field is no longer included in the commit of the newly created repo.\n    if (!magic)\n        magic = pwd_hash;\n\n    switch (enc_version) {\n    case 0:\n        break;\n    case 1:\n        if (!magic || strlen(magic) != 32)\n            return NULL;\n        break;\n    case 2:\n        if (!magic || strlen(magic) != 64)\n            return NULL;\n        if (!random_key || strlen(random_key) != 96)\n            return NULL;\n        break;\n    case 3:\n        if (!magic || strlen(magic) != 64)\n            return NULL;\n        if (!random_key || strlen(random_key) != 96)\n            return NULL;\n        if (!salt || strlen(salt) != 64)\n            return NULL;\n        break;\n    case 4:\n        if (!magic || strlen(magic) != 64)\n            return NULL;\n        if (!random_key || strlen(random_key) != 96)\n            return NULL;\n        if (!salt || strlen(salt) != 64)\n            return NULL;\n        break;\n    default:\n        seaf_warning (\"Unknown encryption version %d.\\n\", enc_version);\n        return NULL;\n    }\n\n    char *creator_name_l = creator_name ? g_ascii_strdown (creator_name, -1) : NULL;\n    commit = seaf_commit_new (commit_id, repo_id, root_id,\n                              creator_name_l, creator, desc, ctime);\n    g_free (creator_name_l);\n\n    commit->parent_id = parent_id ? g_strdup(parent_id) : NULL;\n    commit->second_parent_id = second_parent_id ? g_strdup(second_parent_id) : NULL;\n\n    commit->repo_name = g_strdup(repo_name);\n    commit->repo_desc = g_strdup(repo_desc);\n    if (encrypted && strcmp(encrypted, \"true\") == 0)\n        commit->encrypted = TRUE;\n    else\n        commit->encrypted = FALSE;\n    if (repo_category)\n        commit->repo_category = g_strdup(repo_category);\n    commit->device_name = g_strdup(device_name);\n    commit->client_version = g_strdup(client_version);\n\n    if (commit->encrypted) {\n        commit->enc_version = enc_version;\n        if (enc_version >= 1 && !pwd_hash)\n            commit->magic = g_strdup(magic);\n        if (enc_version >= 2)\n            commit->random_key = g_strdup (random_key);\n        if (enc_version >= 3)\n            commit->salt = g_strdup(salt);\n        if (pwd_hash) {\n            commit->pwd_hash = g_strdup (pwd_hash);\n            commit->pwd_hash_algo = g_strdup (pwd_hash_algo);\n            commit->pwd_hash_params = g_strdup (pwd_hash_params);\n        }\n    }\n    if (no_local_history)\n        commit->no_local_history = TRUE;\n    commit->version = version;\n    if (new_merge)\n        commit->new_merge = TRUE;\n    if (conflict)\n        commit->conflict = TRUE;\n    if (repaired)\n        commit->repaired = TRUE;\n\n    return commit;\n}\n\nstatic SeafCommit *\nload_commit (SeafCommitManager *mgr,\n             const char *repo_id,\n             int version,\n             const char *commit_id)\n{\n    char *data = NULL;\n    int len;\n    SeafCommit *commit = NULL;\n    json_t *object = NULL;\n    json_error_t jerror;\n\n    if (!commit_id || strlen(commit_id) != 40)\n        return NULL;\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 commit_id, (void **)&data, &len) < 0)\n        return NULL;\n\n    object = json_loadb (data, len, 0, &jerror);\n    if (!object) {\n        /* Perhaps the commit object contains invalid UTF-8 character. */\n        if (data[len-1] == 0)\n            clean_utf8_data (data, len - 1);\n        else\n            clean_utf8_data (data, len);\n\n        object = json_loadb (data, len, 0, &jerror);\n        if (!object) {\n            if (jerror.text)\n                seaf_warning (\"Failed to load commit json object: %s.\\n\", jerror.text);\n            else\n                seaf_warning (\"Failed to load commit json object.\\n\");\n            goto out;\n        }\n    }\n\n    commit = commit_from_json_object (commit_id, object);\n    if (commit)\n        commit->manager = mgr;\n\nout:\n    if (object) json_decref (object);\n    g_free (data);\n\n    return commit;\n}\n\nstatic int\nsave_commit (SeafCommitManager *manager,\n             const char *repo_id,\n             int version,\n             SeafCommit *commit)\n{\n    json_t *object = NULL;\n    char *data;\n    gsize len;\n\n    if (seaf_obj_store_obj_exists (manager->obj_store,\n                                   repo_id, version,\n                                   commit->commit_id))\n        return 0;\n\n    object = commit_to_json_object (commit);\n\n    data = json_dumps (object, 0);\n    len = strlen (data);\n\n    json_decref (object);\n\n#ifdef SEAFILE_SERVER\n    if (seaf_obj_store_write_obj (manager->obj_store,\n                                  repo_id, version,\n                                  commit->commit_id,\n                                  data, (int)len, TRUE) < 0) {\n        g_free (data);\n        return -1;\n    }\n#else\n    if (seaf_obj_store_write_obj (manager->obj_store,\n                                  repo_id, version,\n                                  commit->commit_id,\n                                  data, (int)len, FALSE) < 0) {\n        g_free (data);\n        return -1;\n    }\n#endif\n    free (data);\n\n    return 0;\n}\n\nstatic void\ndelete_commit (SeafCommitManager *mgr,\n               const char *repo_id,\n               int version,\n               const char *id)\n{\n    seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id);\n}\n\nint\nseaf_commit_manager_remove_store (SeafCommitManager *mgr,\n                                  const char *store_id)\n{\n    return seaf_obj_store_remove_store (mgr->obj_store, store_id);\n}\n"
  },
  {
    "path": "common/commit-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_COMMIT_MGR_H\n#define SEAF_COMMIT_MGR_H\n\nstruct _SeafCommitManager;\ntypedef struct _SeafCommit SeafCommit;\n\n#include <glib/gstdio.h>\n#include \"db.h\"\n\n#include \"obj-store.h\"\n\nstruct _SeafCommit {\n    struct _SeafCommitManager *manager;\n\n    int         ref;\n\n    char        commit_id[41];\n    char        repo_id[37];\n    char        root_id[41];    /* the fs root */\n    char       *desc;\n    char       *creator_name;\n    char        creator_id[41];\n    guint64     ctime;          /* creation time */\n    char       *parent_id;\n    char       *second_parent_id;\n    char       *repo_name;\n    char       *repo_desc;\n    char       *repo_category;\n    char       *device_name;\n    char       *client_version;\n\n    gboolean    encrypted;         \n    int         enc_version;\n    char       *magic;\n    char       *random_key;\n    char       *salt;\n    char       *pwd_hash;\n    char       *pwd_hash_algo;\n    char       *pwd_hash_params;\n    gboolean    no_local_history;\n\n    int         version;\n    gboolean    new_merge;\n    gboolean    conflict;\n    gboolean    repaired;\n};\n\n\n/**\n * @commit_id: if this is NULL, will create a new id.\n * @ctime: if this is 0, will use current time.\n * \n * Any new commit should be added to commit manager before used.\n */\nSeafCommit *\nseaf_commit_new (const char *commit_id,\n                 const char *repo_id,\n                 const char *root_id,\n                 const char *author_name,\n                 const char *creator_id,\n                 const char *desc,\n                 guint64 ctime);\n\nchar *\nseaf_commit_to_data (SeafCommit *commit, gsize *len);\n\nSeafCommit *\nseaf_commit_from_data (const char *id, char *data, gsize len);\n\nvoid\nseaf_commit_ref (SeafCommit *commit);\n\nvoid\nseaf_commit_unref (SeafCommit *commit);\n\n/* Set stop to TRUE if you want to stop traversing a branch in the history graph. \n   Note, if currently there are multi branches, this function will be called again. \n   So, set stop to TRUE not always stop traversing the history graph.\n*/\ntypedef gboolean (*CommitTraverseFunc) (SeafCommit *commit, void *data, gboolean *stop);\n\nstruct _SeafileSession;\n\ntypedef struct _SeafCommitManager SeafCommitManager;\ntypedef struct _SeafCommitManagerPriv SeafCommitManagerPriv;\n\nstruct _SeafCommitManager {\n    struct _SeafileSession *seaf;\n\n    sqlite3    *db;\n    struct SeafObjStore *obj_store;\n\n    SeafCommitManagerPriv *priv;\n};\n\nSeafCommitManager *\nseaf_commit_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_commit_manager_init (SeafCommitManager *mgr);\n\n/**\n * Add a commit to commit manager and persist it to disk.\n * Any new commit should be added to commit manager before used.\n * This function increments ref count of the commit object.\n * Not MT safe.\n */\nint\nseaf_commit_manager_add_commit (SeafCommitManager *mgr, SeafCommit *commit);\n\n/**\n * Delete a commit from commit manager and permanently remove it from disk.\n * A commit object to be deleted should have ref cournt <= 1.\n * Not MT safe.\n */\nvoid\nseaf_commit_manager_del_commit (SeafCommitManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *id);\n\n/**\n * Find a commit object.\n * This function increments ref count of returned object.\n * Not MT safe.\n */\nSeafCommit* \nseaf_commit_manager_get_commit (SeafCommitManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *id);\n\n/**\n * Get a commit object, with compatibility between version 0 and version 1.\n * It will first try to get commit with version 1 layout; if fails, will\n * try version 0 layout for compatibility.\n * This is useful for loading a repo. In that case, we don't know the version\n * of the repo before loading its head commit.\n */\nSeafCommit *\nseaf_commit_manager_get_commit_compatible (SeafCommitManager *mgr,\n                                           const char *repo_id,\n                                           const char *id);\n\n/**\n * Traverse the commits DAG start from head in topological order.\n * The ordering is based on commit time.\n * return FALSE if some commits is missing, TRUE otherwise.\n */\ngboolean\nseaf_commit_manager_traverse_commit_tree (SeafCommitManager *mgr,\n                                          const char *repo_id,\n                                          int version,\n                                          const char *head,\n                                          CommitTraverseFunc func,\n                                          void *data,\n                                          gboolean skip_errors);\n\n/*\n * The same as the above function, but stops traverse down if parent commit\n * doesn't exists, instead of returning error.\n */\ngboolean\nseaf_commit_manager_traverse_commit_tree_truncated (SeafCommitManager *mgr,\n                                                    const char *repo_id,\n                                                    int version,\n                                                    const char *head,\n                                                    CommitTraverseFunc func,\n                                                    void *data,\n                                                    gboolean skip_errors);\n\n/**\n * Works the same as seaf_commit_manager_traverse_commit_tree, but stops\n * traversing when a total number of _limit_ commits is reached. If\n * limit <= 0, there is no limit\n */\ngboolean\nseaf_commit_manager_traverse_commit_tree_with_limit (SeafCommitManager *mgr,\n                                                     const char *repo_id,\n                                                     int version,\n                                                     const char *head,\n                                                     CommitTraverseFunc func,\n                                                     int limit,\n                                                     void *data,\n                                                     char **next_start_commit,\n                                                     gboolean skip_errors);\n\ngboolean\nseaf_commit_manager_commit_exists (SeafCommitManager *mgr,\n                                   const char *repo_id,\n                                   int version,\n                                   const char *id);\n\nint\nseaf_commit_manager_remove_store (SeafCommitManager *mgr,\n                                  const char *store_id);\n\n#endif\n"
  },
  {
    "path": "common/common.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef COMMON_H\n#define COMMON_H\n\n#ifdef HAVE_CONFIG_H\n #include <config.h>\n#endif\n\n#include <unistd.h>\n#include <stdlib.h>\n#include <stdint.h>             /* uint32_t */\n#include <sys/types.h>          /* size_t */\n#include <errno.h>\n#include <string.h>\n#include <limits.h>\n#include <stdio.h>\n#include <utime.h>\n\n#include <glib.h>\n#include <glib/gstdio.h>\n\n#define EMPTY_SHA1  \"0000000000000000000000000000000000000000\"\n\n#define CURRENT_ENC_VERSION 3\n\n#define DEFAULT_PROTO_VERSION 1\n#define CURRENT_PROTO_VERSION 7\n\n#define CURRENT_REPO_VERSION 1\n\n/* For compatibility with the old protocol, use an UUID for signature.\n * Listen manager on the server will use the new block tx protocol if it\n * receives this signature as \"token\".\n */\n#define BLOCK_PROTOCOL_SIGNATURE \"529319a0-577f-4d6b-a6c3-3c20f56f290c\"\n\n#define SEAF_PATH_MAX 4096\n\n#ifndef ccnet_warning\n#define ccnet_warning(fmt, ...) g_warning(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_error\n#define ccnet_error(fmt, ...)   g_error(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_message\n#define ccnet_message(fmt, ...) g_message(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_debug\n#define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__)\n#endif\n\n#define DEFAULT_CONFIG_DIR \"~/.ccnet\"\n\n#endif\n"
  },
  {
    "path": "common/config-mgr.c",
    "content": "#include \"common.h\"\n#include \"config-mgr.h\"\n#include \"seaf-db.h\"\n#include \"log.h\"\n\nint\nseaf_cfg_manager_init (SeafCfgManager *mgr)\n{\n    char *sql;\n    int db_type = seaf_db_type(mgr->db);\n\n    if (db_type == SEAF_DB_TYPE_MYSQL)\n        sql = \"CREATE TABLE IF NOT EXISTS SeafileConf (\"\n              \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL,\"\n              \"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB\";\n    else\n        sql = \"CREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL,\"\n              \"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)\";\n\n    if (seaf_db_query (mgr->db, sql) < 0)\n        return -1;\n\n    return 0;\n}\n\nSeafCfgManager *\nseaf_cfg_manager_new (SeafileSession *session)\n{\n    SeafCfgManager *mgr = g_new0 (SeafCfgManager, 1);\n    if (!mgr)\n        return NULL;\n\n    mgr->config = session->config;\n    mgr->db = session->db;\n\n    return mgr;\n}\n\nint\nseaf_cfg_manager_set_config_int (SeafCfgManager *mgr,\n                                 const char *group,\n                                 const char *key,\n                                 int value)\n{\n    char value_str[256];\n\n    snprintf (value_str, sizeof(value_str), \"%d\", value);\n\n    return seaf_cfg_manager_set_config (mgr, group, key, value_str);\n}\n\nint\nseaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr,\n                                   const char *group,\n                                   const char *key,\n                                   gint64 value)\n{\n    char value_str[256];\n\n    snprintf (value_str, sizeof(value_str), \"%\"G_GINT64_FORMAT\"\", value);\n\n    return seaf_cfg_manager_set_config (mgr, group, key, value_str);\n}\n\nint\nseaf_cfg_manager_set_config_string (SeafCfgManager *mgr,\n                                    const char *group,\n                                    const char *key,\n                                    const char *value)\n{\n    char value_str[256];\n\n    snprintf (value_str, sizeof(value_str), \"%s\", value);\n\n    return seaf_cfg_manager_set_config (mgr, group, key, value_str);\n}\n\nint\nseaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr,\n                                     const char *group,\n                                     const char *key,\n                                     gboolean value)\n{\n    char value_str[256];\n\n    if (value)\n        snprintf (value_str, sizeof(value_str), \"true\");\n    else\n        snprintf (value_str, sizeof(value_str), \"false\");\n\n    return seaf_cfg_manager_set_config (mgr, group, key, value_str);\n}\n\nint\nseaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value)\n{\n    gboolean exists, err = FALSE;\n\n    char *sql = \"SELECT 1 FROM SeafileConf WHERE cfg_group=? AND cfg_key=?\";\n    exists = seaf_db_statement_exists(mgr->db, sql, &err,\n                                      2, \"string\", group,\n                                      \"string\", key);\n    if (err) {\n        seaf_warning (\"[db error]Failed to set config [%s:%s] to db.\\n\", group, key);\n        return -1;\n    }\n    if (exists)\n        sql = \"UPDATE SeafileConf SET value=? WHERE cfg_group=? AND cfg_key=?\";\n    else\n        sql = \"INSERT INTO SeafileConf (value, cfg_group, cfg_key, property) VALUES \"\n              \"(?,?,?,0)\";\n    if (seaf_db_statement_query (mgr->db, sql, 3,\n                                 \"string\", value, \"string\",\n                                 group, \"string\", key) < 0) {\n        seaf_warning (\"Failed to set config [%s:%s] to db.\\n\", group, key);\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key)\n{\n    int ret;\n    char *invalid = NULL;\n\n    char *value = seaf_cfg_manager_get_config (mgr, group, key);\n    if (!value) {\n        GError *err = NULL;\n        ret = g_key_file_get_integer (mgr->config, group, key, &err);\n        if (err) {\n            ret = -1;\n            g_clear_error(&err);\n        }\n    } else {\n        ret = strtol (value, &invalid, 10);\n        if (*invalid != '\\0') {\n            ret = -1;\n            seaf_warning (\"Value of config [%s:%s] is invalid: [%s]\\n\", group, key, value);\n        }\n        g_free (value);\n    }\n\n    return ret;\n}\n\ngint64\nseaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key)\n{\n    gint64 ret;\n    char *invalid = NULL;\n\n    char *value = seaf_cfg_manager_get_config (mgr, group, key);\n    if (!value) {\n        GError *err = NULL;\n        ret = g_key_file_get_int64(mgr->config, group, key, &err);\n        if (err) {\n            ret = -1;\n            g_clear_error(&err);\n        }\n    } else {\n        ret = strtoll (value, &invalid, 10);\n        if (*invalid != '\\0') {\n            seaf_warning (\"Value of config [%s:%s] is invalid: [%s]\\n\", group, key, value);\n            ret = -1;\n        }\n        g_free (value);\n    }\n\n    return ret;\n}\n\ngboolean\nseaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key)\n{\n    gboolean ret;\n\n    char *value = seaf_cfg_manager_get_config (mgr, group, key);\n    if (!value) {\n        GError *err = NULL;\n        ret = g_key_file_get_boolean(mgr->config, group, key, &err);\n        if (err) {\n            seaf_warning (\"Config [%s:%s] not set, default is false.\\n\", group, key);\n            ret = FALSE;\n            g_clear_error(&err);\n        }\n    } else {\n        if (strcmp (\"true\", value) == 0)\n            ret = TRUE;\n        else\n            ret = FALSE;\n        g_free (value);\n    }\n\n    return ret;\n}\n\nchar *\nseaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key)\n{\n    char *ret = NULL;\n\n    char *value = seaf_cfg_manager_get_config (mgr, group, key);\n    if (!value) {\n        ret = g_key_file_get_string (mgr->config, group, key, NULL);\n        if (ret != NULL)\n            ret = g_strstrip(ret);\n    } else {\n        ret = value;\n    }\n\n    return ret;\n}\n\nchar *\nseaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key)\n{\n    char *sql = \"SELECT value FROM SeafileConf WHERE cfg_group=? AND cfg_key=?\";\n    char *value = seaf_db_statement_get_string(mgr->db, sql, \n                                               2, \"string\", group, \"string\", key);\n    if (value != NULL)\n        value = g_strstrip(value);\n\n    return value;\n}\n"
  },
  {
    "path": "common/config-mgr.h",
    "content": "#ifndef SEAF_CONFIG_MGR_H\n#define SEAF_CONFIG_MGR_H\n\ntypedef struct _SeafCfgManager SeafCfgManager;\n#include \"seafile-session.h\"\n\nstruct _SeafCfgManager {\n    GKeyFile *config;\n    SeafDB *db;\n};\n\ntypedef struct _SeafileSession SeafileSession;\n\nSeafCfgManager *\nseaf_cfg_manager_new (SeafileSession *seaf);\n\nint\nseaf_cfg_manager_set_config (SeafCfgManager *mgr, const char *group, const char *key, const char *value);\n\nchar *\nseaf_cfg_manager_get_config (SeafCfgManager *mgr, const char *group, const char *key);\n\nint\nseaf_cfg_manager_set_config_int (SeafCfgManager *mgr, const char *group, const char *key, int value);\n\nint\nseaf_cfg_manager_get_config_int (SeafCfgManager *mgr, const char *group, const char *key);\n\nint\nseaf_cfg_manager_set_config_int64 (SeafCfgManager *mgr, const char *group, const char *key, gint64 value);\n\ngint64\nseaf_cfg_manager_get_config_int64 (SeafCfgManager *mgr, const char *group, const char *key);\n\nint\nseaf_cfg_manager_set_config_string (SeafCfgManager *mgr, const char *group, const char *key, const char *value);\n\nchar *\nseaf_cfg_manager_get_config_string (SeafCfgManager *mgr, const char *group, const char *key);\n\nint\nseaf_cfg_manager_set_config_boolean (SeafCfgManager *mgr, const char *group, const char *key, gboolean value);\n\ngboolean\nseaf_cfg_manager_get_config_boolean (SeafCfgManager *mgr, const char *group, const char *key);\n\nint\nseaf_cfg_manager_init (SeafCfgManager *mgr);\n\n#endif /* SEAF_CONFIG_MGR_H */\n"
  },
  {
    "path": "common/diff-simple.c",
    "content": "#include \"common.h\"\n#include \"diff-simple.h\"\n#include \"utils.h\"\n#include \"log.h\"\n\nDiffEntry *\ndiff_entry_new (char type, char status, unsigned char *sha1, const char *name)\n{\n    DiffEntry *de = g_new0 (DiffEntry, 1);\n\n    de->type = type;\n    de->status = status;\n    memcpy (de->sha1, sha1, 20);\n    de->name = g_strdup(name);\n\n    return de;\n}\n\nDiffEntry *\ndiff_entry_new_from_dirent (char type, char status,\n                            SeafDirent *dent, const char *basedir)\n{\n    DiffEntry *de = g_new0 (DiffEntry, 1);\n    unsigned char sha1[20];\n    char *path;\n\n    hex_to_rawdata (dent->id, sha1, 20);\n    path = g_strconcat (basedir, dent->name, NULL);\n\n    de->type = type;\n    de->status = status;\n    memcpy (de->sha1, sha1, 20);\n    de->name = path;\n    de->size = dent->size;\n\n#ifdef SEAFILE_CLIENT\n    if (type == DIFF_TYPE_COMMITS &&\n        (status == DIFF_STATUS_ADDED ||\n         status == DIFF_STATUS_MODIFIED ||\n         status == DIFF_STATUS_DIR_ADDED ||\n         status == DIFF_STATUS_DIR_DELETED)) {\n        de->mtime = dent->mtime;\n        de->mode = dent->mode;\n        de->modifier = g_strdup(dent->modifier);\n    }\n#endif\n\n    return de;\n}\n\nvoid\ndiff_entry_free (DiffEntry *de)\n{\n    g_free (de->name);\n    if (de->new_name)\n        g_free (de->new_name);\n\n#ifdef SEAFILE_CLIENT\n    g_free (de->modifier);\n#endif\n\n    g_free (de);\n}\n\ninline static gboolean\ndirent_same (SeafDirent *denta, SeafDirent *dentb)\n{\n    return (strcmp (dentb->id, denta->id) == 0 &&\n\t    denta->mode == dentb->mode &&\n\t    denta->mtime == dentb->mtime);\n}\n\nstatic int\ndiff_files (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt)\n{\n    SeafDirent *files[3];\n    int i, n_files = 0;\n\n    memset (files, 0, sizeof(files[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] && S_ISREG(dents[i]->mode)) {\n            files[i] = dents[i];\n            ++n_files;\n        }\n    }\n\n    if (n_files == 0)\n        return 0;\n\n    return opt->file_cb (n, basedir, files, opt->data);\n}\n\nstatic int\ndiff_trees_recursive (int n, SeafDir *trees[],\n                      const char *basedir, DiffOptions *opt);\n\nstatic int\ndiff_directories (int n, SeafDirent *dents[], const char *basedir, DiffOptions *opt)\n{\n    SeafDirent *dirs[3];\n    int i, n_dirs = 0;\n    char *dirname = \"\";\n    int ret;\n    SeafDir *sub_dirs[3], *dir;\n\n    memset (dirs, 0, sizeof(dirs[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] && S_ISDIR(dents[i]->mode)) {\n            dirs[i] = dents[i];\n            ++n_dirs;\n        }\n    }\n\n    if (n_dirs == 0)\n        return 0;\n\n    gboolean recurse = TRUE;\n    ret = opt->dir_cb (n, basedir, dirs, opt->data, &recurse);\n    if (ret < 0)\n        return ret;\n\n    if (!recurse)\n        return 0;\n\n    memset (sub_dirs, 0, sizeof(sub_dirs[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) {\n            dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                               opt->store_id,\n                                               opt->version,\n                                               dents[i]->id);\n            if (!dir) {\n                seaf_warning (\"Failed to find dir %s:%s.\\n\",\n                              opt->store_id, dents[i]->id);\n                ret = -1;\n                goto free_sub_dirs;\n            }\n            sub_dirs[i] = dir;\n\n            dirname = dents[i]->name;\n        }\n    }\n\n    char *new_basedir = g_strconcat (basedir, dirname, \"/\", NULL);\n\n    ret = diff_trees_recursive (n, sub_dirs, new_basedir, opt);\n\n    g_free (new_basedir);\n\nfree_sub_dirs:\n    for (i = 0; i < n; ++i)\n        seaf_dir_free (sub_dirs[i]);\n    return ret;\n}\n\nstatic int\ndiff_trees_recursive (int n, SeafDir *trees[],\n                      const char *basedir, DiffOptions *opt)\n{\n    GList *ptrs[3];\n    SeafDirent *dents[3];\n    int i;\n    SeafDirent *dent;\n    char *first_name;\n    gboolean done;\n    int ret = 0;\n\n    for (i = 0; i < n; ++i) {\n        if (trees[i])\n            ptrs[i] = trees[i]->entries;\n        else\n            ptrs[i] = NULL;\n    }\n\n    while (1) {\n        first_name = NULL;\n        memset (dents, 0, sizeof(dents[0])*n);\n        done = TRUE;\n\n        /* Find the \"largest\" name, assuming dirents are sorted. */\n        for (i = 0; i < n; ++i) {\n            if (ptrs[i] != NULL) {\n                done = FALSE;\n                dent = ptrs[i]->data;\n                if (!first_name)\n                    first_name = dent->name;\n                else if (strcmp(dent->name, first_name) > 0)\n                    first_name = dent->name;\n            }\n        }\n\n        if (done)\n            break;\n\n        /*\n         * Setup dir entries for all names that equal to first_name\n         */\n        for (i = 0; i < n; ++i) {\n            if (ptrs[i] != NULL) {\n                dent = ptrs[i]->data;\n                if (strcmp(first_name, dent->name) == 0) {\n                    dents[i] = dent;\n                    ptrs[i] = ptrs[i]->next;\n                }\n            }\n        }\n\n        if (n == 2 && dents[0] && dents[1] && dirent_same(dents[0], dents[1]))\n            continue;\n\n        if (n == 3 && dents[0] && dents[1] && dents[2] &&\n            dirent_same(dents[0], dents[1]) && dirent_same(dents[0], dents[2]))\n            continue;\n\n        /* Diff files of this level. */\n        ret = diff_files (n, dents, basedir, opt);\n        if (ret < 0)\n            return ret;\n\n        /* Recurse into sub level. */\n        ret = diff_directories (n, dents, basedir, opt);\n        if (ret < 0)\n            return ret;\n    }\n\n    return ret;\n}\n\nint\ndiff_trees (int n, const char *roots[], DiffOptions *opt)\n{\n    SeafDir **trees, *root;\n    int i, ret;\n\n    g_return_val_if_fail (n == 2 || n == 3, -1);\n\n    trees = g_new0 (SeafDir *, n);\n    for (i = 0; i < n; ++i) {\n        root = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                            opt->store_id,\n                                            opt->version,\n                                            roots[i]);\n        if (!root) {\n            seaf_warning (\"Failed to find dir %s:%s.\\n\", opt->store_id, roots[i]);\n            g_free (trees);\n            return -1;\n        }\n        trees[i] = root;\n    }\n\n    ret = diff_trees_recursive (n, trees, \"\", opt);\n\n    for (i = 0; i < n; ++i)\n        seaf_dir_free (trees[i]);\n    g_free (trees);\n\n    return ret;\n}\n\ntypedef struct DiffData {\n    GList **results;\n    gboolean fold_dir_diff;\n} DiffData;\n\nstatic int\ntwoway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata)\n{\n    DiffData *data = vdata;\n    GList **results = data->results;\n    DiffEntry *de;\n    SeafDirent *tree1 = files[0];\n    SeafDirent *tree2 = files[1];\n\n    if (!tree1) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED,\n                                         tree2, basedir);\n        *results = g_list_prepend (*results, de);\n        return 0;\n    }\n\n    if (!tree2) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED,\n                                         tree1, basedir);\n        *results = g_list_prepend (*results, de);\n        return 0;\n    }\n\n    if (!dirent_same (tree1, tree2)) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,\n                                         tree2, basedir);\n        de->origin_size = tree1->size;\n        *results = g_list_prepend (*results, de);\n    }\n\n    return 0;\n}\n\nstatic int\ntwoway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata,\n                  gboolean *recurse)\n{\n    DiffData *data = vdata;\n    GList **results = data->results;\n    DiffEntry *de;\n    SeafDirent *tree1 = dirs[0];\n    SeafDirent *tree2 = dirs[1];\n\n    if (!tree1) {\n        if (strcmp (tree2->id, EMPTY_SHA1) == 0 || data->fold_dir_diff) {\n            de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DIR_ADDED,\n                                             tree2, basedir);\n            *results = g_list_prepend (*results, de);\n            *recurse = FALSE;\n        } else\n            *recurse = TRUE;\n        return 0;\n    }\n\n    if (!tree2) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS,\n                                         DIFF_STATUS_DIR_DELETED,\n                                         tree1, basedir);\n        *results = g_list_prepend (*results, de);\n\n        if (data->fold_dir_diff) {\n            *recurse = FALSE;\n        } else\n            *recurse = TRUE;\n        return 0;\n    }\n\n    return 0;\n}\n\nint\ndiff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results,\n              gboolean fold_dir_diff)\n{\n    SeafRepo *repo = NULL;\n    DiffOptions opt;\n    const char *roots[2];\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, commit1->repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", commit1->repo_id);\n        return -1;\n    }\n\n    DiffData data;\n    memset (&data, 0, sizeof(data));\n    data.results = results;\n    data.fold_dir_diff = fold_dir_diff;\n\n    memset (&opt, 0, sizeof(opt));\n#ifdef SEAFILE_SERVER\n    memcpy (opt.store_id, repo->store_id, 36);\n#else\n    memcpy (opt.store_id, repo->id, 36);\n#endif\n    opt.version = repo->version;\n    opt.file_cb = twoway_diff_files;\n    opt.dir_cb = twoway_diff_dirs;\n    opt.data = &data;\n\n#ifdef SEAFILE_SERVER\n    seaf_repo_unref (repo);\n#endif\n\n    roots[0] = commit1->root_id;\n    roots[1] = commit2->root_id;\n\n    diff_trees (2, roots, &opt);\n    diff_resolve_renames (results);\n\n    return 0;\n}\n\nint\ndiff_commit_roots (const char *store_id, int version,\n                   const char *root1, const char *root2, GList **results,\n                   gboolean fold_dir_diff)\n{\n    DiffOptions opt;\n    const char *roots[2];\n\n    DiffData data;\n    memset (&data, 0, sizeof(data));\n    data.results = results;\n    data.fold_dir_diff = fold_dir_diff;\n\n    memset (&opt, 0, sizeof(opt));\n    memcpy (opt.store_id, store_id, 36);\n    opt.version = version;\n    opt.file_cb = twoway_diff_files;\n    opt.dir_cb = twoway_diff_dirs;\n    opt.data = &data;\n\n    roots[0] = root1;\n    roots[1] = root2;\n\n    diff_trees (2, roots, &opt);\n    diff_resolve_renames (results);\n\n    return 0;\n}\n\nstatic int\nthreeway_diff_files (int n, const char *basedir, SeafDirent *files[], void *vdata)\n{\n    DiffData *data = vdata;\n    SeafDirent *m = files[0];\n    SeafDirent *p1 = files[1];\n    SeafDirent *p2 = files[2];\n    GList **results = data->results;\n    DiffEntry *de;\n\n    /* diff m with both p1 and p2. */\n    if (m && p1 && p2) {\n        if (!dirent_same(m, p1) && !dirent_same (m, p2)) {\n            de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,\n                                             m, basedir);\n            *results = g_list_prepend (*results, de);\n        }\n    } else if (!m && p1 && p2) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_DELETED,\n                                         p1, basedir);\n        *results = g_list_prepend (*results, de);\n    } else if (m && !p1 && p2) {\n        if (!dirent_same (m, p2)) {\n            de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,\n                                             m, basedir);\n            *results = g_list_prepend (*results, de);\n        }\n    } else if (m && p1 && !p2) {\n        if (!dirent_same (m, p1)) {\n            de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_MODIFIED,\n                                             m, basedir);\n            *results = g_list_prepend (*results, de);\n        }\n    } else if (m && !p1 && !p2) {\n        de = diff_entry_new_from_dirent (DIFF_TYPE_COMMITS, DIFF_STATUS_ADDED,\n                                         m, basedir);\n        *results = g_list_prepend (*results, de);\n    }\n    /* Nothing to do for:\n     * 1. !m && p1 && !p2;\n     * 2. !m && !p1 && p2;\n     * 3. !m && !p1 && !p2 (should not happen)\n     */\n\n    return 0;\n}\n\nstatic int\nthreeway_diff_dirs (int n, const char *basedir, SeafDirent *dirs[], void *vdata,\n                    gboolean *recurse)\n{\n    *recurse = TRUE;\n    return 0;\n}\n\nint\ndiff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff)\n{\n    SeafRepo *repo = NULL;\n    DiffOptions opt;\n    const char *roots[3];\n    SeafCommit *parent1, *parent2;\n\n    g_return_val_if_fail (*results == NULL, -1);\n    g_return_val_if_fail (merge->parent_id != NULL &&\n                          merge->second_parent_id != NULL,\n                          -1);\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, merge->repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", merge->repo_id);\n        return -1;\n    }\n\n    parent1 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                              repo->id,\n                                              repo->version,\n                                              merge->parent_id);\n    if (!parent1) {\n        seaf_warning (\"failed to find commit %s:%s.\\n\", repo->id, merge->parent_id);\n        return -1;\n    }\n\n    parent2 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                              repo->id,\n                                              repo->version,\n                                              merge->second_parent_id);\n    if (!parent2) {\n        seaf_warning (\"failed to find commit %s:%s.\\n\",\n                      repo->id, merge->second_parent_id);\n        seaf_commit_unref (parent1);\n        return -1;\n    }\n\n    DiffData data;\n    memset (&data, 0, sizeof(data));\n    data.results = results;\n    data.fold_dir_diff = fold_dir_diff;\n\n    memset (&opt, 0, sizeof(opt));\n#ifdef SEAFILE_SERVER\n    memcpy (opt.store_id, repo->store_id, 36);\n#else\n    memcpy (opt.store_id, repo->id, 36);\n#endif\n    opt.version = repo->version;\n    opt.file_cb = threeway_diff_files;\n    opt.dir_cb = threeway_diff_dirs;\n    opt.data = &data;\n\n#ifdef SEAFILE_SERVER\n    seaf_repo_unref (repo);\n#endif\n\n    roots[0] = merge->root_id;\n    roots[1] = parent1->root_id;\n    roots[2] = parent2->root_id;\n\n    int ret = diff_trees (3, roots, &opt);\n    diff_resolve_renames (results);\n\n    seaf_commit_unref (parent1);\n    seaf_commit_unref (parent2);\n\n    return ret;\n}\n\nint\ndiff_merge_roots (const char *store_id, int version,\n                  const char *merged_root, const char *p1_root, const char *p2_root,\n                  GList **results, gboolean fold_dir_diff)\n{\n    DiffOptions opt;\n    const char *roots[3];\n\n    g_return_val_if_fail (*results == NULL, -1);\n\n    DiffData data;\n    memset (&data, 0, sizeof(data));\n    data.results = results;\n    data.fold_dir_diff = fold_dir_diff;\n\n    memset (&opt, 0, sizeof(opt));\n    memcpy (opt.store_id, store_id, 36);\n    opt.version = version;\n    opt.file_cb = threeway_diff_files;\n    opt.dir_cb = threeway_diff_dirs;\n    opt.data = &data;\n\n    roots[0] = merged_root;\n    roots[1] = p1_root;\n    roots[2] = p2_root;\n\n    diff_trees (3, roots, &opt);\n    diff_resolve_renames (results);\n\n    return 0;\n}\n\n/* This function only resolve \"strict\" rename, i.e. two files must be\n * exactly the same.\n * Don't detect rename of empty files and empty dirs.\n */\nvoid\ndiff_resolve_renames (GList **diff_entries)\n{\n    GHashTable *deleted_files = NULL, *deleted_dirs = NULL;\n    GList *p;\n    GList *added = NULL;\n    DiffEntry *de;\n    unsigned char empty_sha1[20];\n    unsigned int deleted_empty_count = 0, deleted_empty_dir_count = 0;\n    unsigned int added_empty_count = 0, added_empty_dir_count = 0;\n    gboolean check_empty_dir, check_empty_file;\n\n    memset (empty_sha1, 0, 20);\n\n    /* Hash and equal functions for raw sha1. */\n    deleted_dirs = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal);\n    deleted_files = g_hash_table_new (ccnet_sha1_hash, ccnet_sha1_equal);\n\n    /* Count deleted and added entries of which content is empty. */\n    for (p = *diff_entries; p != NULL; p = p->next) {\n        de = p->data;\n        if (memcmp (de->sha1, empty_sha1, 20) == 0) {\n            if (de->status == DIFF_STATUS_DELETED)\n                deleted_empty_count++;\n            if (de->status == DIFF_STATUS_DIR_DELETED)\n                deleted_empty_dir_count++;\n            if (de->status == DIFF_STATUS_ADDED)\n                added_empty_count++;\n            if (de->status == DIFF_STATUS_DIR_ADDED)\n                added_empty_dir_count++;\n        }\n    }\n\n    check_empty_dir = (deleted_empty_dir_count == 1 && added_empty_dir_count == 1);\n    check_empty_file = (deleted_empty_count == 1 && added_empty_count == 1);\n\n    /* Collect all \"deleted\" entries. */\n    for (p = *diff_entries; p != NULL; p = p->next) {\n        de = p->data;\n        if (de->status == DIFF_STATUS_DELETED) {\n            if (memcmp (de->sha1, empty_sha1, 20) == 0 &&\n                check_empty_file == FALSE)\n                continue;\n\n            g_hash_table_insert (deleted_files, de->sha1, p);\n        }\n\n        if (de->status == DIFF_STATUS_DIR_DELETED) {\n            if (memcmp (de->sha1, empty_sha1, 20) == 0 &&\n                check_empty_dir == FALSE)\n                continue;\n\n            g_hash_table_insert (deleted_dirs, de->sha1, p);\n        }\n    }\n\n    /* Collect all \"added\" entries into a separate list. */\n    for (p = *diff_entries; p != NULL; p = p->next) {\n        de = p->data;\n        if (de->status == DIFF_STATUS_ADDED) {\n            if (memcmp (de->sha1, empty_sha1, 20) == 0 &&\n                check_empty_file == 0)\n                continue;\n\n            added = g_list_prepend (added, p);\n        }\n\n        if (de->status == DIFF_STATUS_DIR_ADDED) {\n            if (memcmp (de->sha1, empty_sha1, 20) == 0 &&\n                check_empty_dir == 0)\n                continue;\n\n            added = g_list_prepend (added, p);\n        }\n    }\n\n    /* For each \"added\" entry, if we find a \"deleted\" entry with\n     * the same content, we find a rename pair.\n     */\n    p = added;\n    while (p != NULL) {\n        GList *p_add, *p_del;\n        DiffEntry *de_add, *de_del, *de_rename;\n        int rename_status;\n\n        p_add = p->data;\n        de_add = p_add->data;\n\n        if (de_add->status == DIFF_STATUS_ADDED)\n            p_del = g_hash_table_lookup (deleted_files, de_add->sha1);\n        else\n            p_del = g_hash_table_lookup (deleted_dirs, de_add->sha1);\n\n        if (p_del) {\n            de_del = p_del->data;\n\n            if (de_add->status == DIFF_STATUS_DIR_ADDED)\n                rename_status = DIFF_STATUS_DIR_RENAMED;\n            else\n                rename_status = DIFF_STATUS_RENAMED;\n\n            de_rename = diff_entry_new (de_del->type, rename_status, \n                                        de_del->sha1, de_del->name);\n            de_rename->new_name = g_strdup(de_add->name);\n\n            *diff_entries = g_list_delete_link (*diff_entries, p_add);\n            *diff_entries = g_list_delete_link (*diff_entries, p_del);\n            *diff_entries = g_list_prepend (*diff_entries, de_rename);\n\n            if (de_del->status == DIFF_STATUS_DIR_DELETED)\n                g_hash_table_remove (deleted_dirs, de_add->sha1);\n            else\n                g_hash_table_remove (deleted_files, de_add->sha1);\n\n            diff_entry_free (de_add);\n            diff_entry_free (de_del);\n        }\n\n        p = g_list_delete_link (p, p);\n    }\n\n    g_hash_table_destroy (deleted_dirs);\n    g_hash_table_destroy (deleted_files);\n}\n\nstatic gboolean\nis_redundant_empty_dir (DiffEntry *de_dir, DiffEntry *de_file)\n{\n    int dir_len;\n\n    if (de_dir->status == DIFF_STATUS_DIR_ADDED &&\n        de_file->status == DIFF_STATUS_DELETED)\n    {\n        dir_len = strlen (de_dir->name);\n        if (strlen (de_file->name) > dir_len &&\n            strncmp (de_dir->name, de_file->name, dir_len) == 0)\n            return TRUE;\n    }\n\n    if (de_dir->status == DIFF_STATUS_DIR_DELETED &&\n        de_file->status == DIFF_STATUS_ADDED)\n    {\n        dir_len = strlen (de_dir->name);\n        if (strlen (de_file->name) > dir_len &&\n            strncmp (de_dir->name, de_file->name, dir_len) == 0)\n            return TRUE;\n    }\n\n    return FALSE;\n}\n\n/*\n * An empty dir entry may be added by deleting all the files under it.\n * Similarly, an empty dir entry may be deleted by adding some file in it.\n * In both cases, we don't want to include the empty dir entry in the\n * diff results.\n */\nvoid\ndiff_resolve_empty_dirs (GList **diff_entries)\n{\n    GList *empty_dirs = NULL;\n    GList *p, *dir, *file;\n    DiffEntry *de, *de_dir, *de_file;\n\n    for (p = *diff_entries; p != NULL; p = p->next) {\n        de = p->data;\n        if (de->status == DIFF_STATUS_DIR_ADDED ||\n            de->status == DIFF_STATUS_DIR_DELETED)\n            empty_dirs = g_list_prepend (empty_dirs, p);\n    }\n\n    for (dir = empty_dirs; dir != NULL; dir = dir->next) {\n        de_dir = ((GList *)dir->data)->data;\n        for (file = *diff_entries; file != NULL; file = file->next) {\n            de_file = file->data;\n            if (is_redundant_empty_dir (de_dir, de_file)) {\n                *diff_entries = g_list_delete_link (*diff_entries, dir->data);\n                break;\n            }\n        }\n    }\n\n    g_list_free (empty_dirs);\n}\n\nint diff_unmerged_state(int mask)\n{\n    mask >>= 1;\n    switch (mask) {\n        case 7:\n            return STATUS_UNMERGED_BOTH_CHANGED;\n        case 3:\n            return STATUS_UNMERGED_OTHERS_REMOVED;\n        case 5:\n            return STATUS_UNMERGED_I_REMOVED;\n        case 6:\n            return STATUS_UNMERGED_BOTH_ADDED;\n        case 2:\n            return STATUS_UNMERGED_DFC_I_ADDED_FILE;\n        case 4:\n            return STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE;\n        default:\n            seaf_warning (\"Unexpected unmerged case\\n\");\n    }\n    return 0;\n}\n\nchar *\nformat_diff_results(GList *results)\n{\n    GList *ptr;\n    GString *fmt_status;\n    DiffEntry *de;\n\n    fmt_status = g_string_new(\"\");\n\n    for (ptr = results; ptr; ptr = ptr->next) {\n        de = ptr->data;\n\n        if (de->status != DIFF_STATUS_RENAMED)\n            g_string_append_printf(fmt_status, \"%c %c %d %u %s\\n\",\n                                   de->type, de->status, de->unmerge_state,\n                                   (int)strlen(de->name), de->name);\n        else\n            g_string_append_printf(fmt_status, \"%c %c %d %u %s %u %s\\n\",\n                                   de->type, de->status, de->unmerge_state,\n                                   (int)strlen(de->name), de->name,\n                                   (int)strlen(de->new_name), de->new_name);\n    }\n\n    return g_string_free(fmt_status, FALSE);\n}\n\ninline static char *\nget_basename (char *path)\n{\n    char *slash;\n    slash = strrchr (path, '/');\n    if (!slash)\n        return path;\n    return (slash + 1);\n}\n\nchar *\ndiff_results_to_description (GList *results)\n{\n    GList *p;\n    DiffEntry *de;\n    char *add_mod_file = NULL, *removed_file = NULL;\n    char *renamed_file = NULL, *renamed_dir = NULL;\n    char *new_dir = NULL, *removed_dir = NULL;\n    int n_add_mod = 0, n_removed = 0, n_renamed = 0;\n    int n_new_dir = 0, n_removed_dir = 0, n_renamed_dir = 0;\n    GString *desc;\n\n    if (results == NULL)\n        return NULL;\n\n    for (p = results; p != NULL; p = p->next) {\n        de = p->data;\n        switch (de->status) {\n        case DIFF_STATUS_ADDED:\n            if (n_add_mod == 0)\n                add_mod_file = get_basename(de->name);\n            n_add_mod++;\n            break;\n        case DIFF_STATUS_DELETED:\n            if (n_removed == 0)\n                removed_file = get_basename(de->name);\n            n_removed++;\n            break;\n        case DIFF_STATUS_RENAMED:\n            if (n_renamed == 0)\n                renamed_file = get_basename(de->name);\n            n_renamed++;\n            break;\n        case DIFF_STATUS_MODIFIED:\n            if (n_add_mod == 0)\n                add_mod_file = get_basename(de->name);\n            n_add_mod++;\n            break;\n        case DIFF_STATUS_DIR_ADDED:\n            if (n_new_dir == 0)\n                new_dir = get_basename(de->name);\n            n_new_dir++;\n            break;\n        case DIFF_STATUS_DIR_DELETED:\n            if (n_removed_dir == 0)\n                removed_dir = get_basename(de->name);\n            n_removed_dir++;\n            break;\n        case DIFF_STATUS_DIR_RENAMED:\n            if (n_renamed_dir == 0)\n                renamed_dir = get_basename(de->name);\n            n_renamed_dir++;\n            break;\n        }\n    }\n\n    desc = g_string_new (\"\");\n\n    if (n_add_mod == 1)\n        g_string_append_printf (desc, \"Added or modified \\\"%s\\\".\\n\", add_mod_file);\n    else if (n_add_mod > 1)\n        g_string_append_printf (desc, \"Added or modified \\\"%s\\\" and %d more files.\\n\",\n                                add_mod_file, n_add_mod - 1);\n\n    if (n_removed == 1)\n        g_string_append_printf (desc, \"Deleted \\\"%s\\\".\\n\", removed_file);\n    else if (n_removed > 1)\n        g_string_append_printf (desc, \"Deleted \\\"%s\\\" and %d more files.\\n\",\n                                removed_file, n_removed - 1);\n\n    if (n_renamed == 1)\n        g_string_append_printf (desc, \"Renamed \\\"%s\\\".\\n\", renamed_file);\n    else if (n_renamed > 1)\n        g_string_append_printf (desc, \"Renamed \\\"%s\\\" and %d more files.\\n\",\n                                renamed_file, n_renamed - 1);\n\n    if (n_new_dir == 1)\n        g_string_append_printf (desc, \"Added directory \\\"%s\\\".\\n\", new_dir);\n    else if (n_new_dir > 1)\n        g_string_append_printf (desc, \"Added \\\"%s\\\" and %d more directories.\\n\",\n                                new_dir, n_new_dir - 1);\n\n    if (n_removed_dir == 1)\n        g_string_append_printf (desc, \"Removed directory \\\"%s\\\".\\n\", removed_dir);\n    else if (n_removed_dir > 1)\n        g_string_append_printf (desc, \"Removed \\\"%s\\\" and %d more directories.\\n\",\n                                removed_dir, n_removed_dir - 1);\n\n    if (n_renamed_dir == 1)\n        g_string_append_printf (desc, \"Renamed directory \\\"%s\\\".\\n\", renamed_dir);\n    else if (n_renamed_dir > 1)\n        g_string_append_printf (desc, \"Renamed \\\"%s\\\" and %d more directories.\\n\",\n                                renamed_dir, n_renamed_dir - 1);\n\n    return g_string_free (desc, FALSE);\n}\n"
  },
  {
    "path": "common/diff-simple.h",
    "content": "#ifndef DIFF_SIMPLE_H\n#define DIFF_SIMPLE_H\n\n#include <glib.h>\n#include \"seafile-session.h\"\n\n#define DIFF_TYPE_WORKTREE              'W' /* diff from index to worktree */\n#define DIFF_TYPE_INDEX                 'I' /* diff from commit to index */\n#define DIFF_TYPE_COMMITS               'C' /* diff between two commits*/\n\n#define DIFF_STATUS_ADDED               'A'\n#define DIFF_STATUS_DELETED             'D'\n#define DIFF_STATUS_MODIFIED\t        'M'\n#define DIFF_STATUS_RENAMED             'R'\n#define DIFF_STATUS_UNMERGED\t\t'U'\n#define DIFF_STATUS_DIR_ADDED           'B'\n#define DIFF_STATUS_DIR_DELETED         'C'\n#define DIFF_STATUS_DIR_RENAMED         'E'\n\nenum {\n    STATUS_UNMERGED_NONE,\n    /* I and others modified the same file differently. */\n    STATUS_UNMERGED_BOTH_CHANGED,\n    /* I and others created the same file with different contents. */\n    STATUS_UNMERGED_BOTH_ADDED,\n    /* I removed a file while others modified it. */\n    STATUS_UNMERGED_I_REMOVED,\n    /* Others removed a file while I modified it. */\n    STATUS_UNMERGED_OTHERS_REMOVED,\n    /* I replace a directory with a file while others modified files under the directory. */\n    STATUS_UNMERGED_DFC_I_ADDED_FILE,\n    /* Others replace a directory with a file while I modified files under the directory. */\n    STATUS_UNMERGED_DFC_OTHERS_ADDED_FILE,\n};\n\ntypedef struct DiffEntry {\n    char type;\n    char status;\n    int unmerge_state;\n    unsigned char sha1[20];     /* used for resolve rename */\n    char *name;\n    char *new_name;             /* only used in rename. */\n    gint64 size;\n    gint64 origin_size;         /* only used in modified */\n} DiffEntry;\n\nDiffEntry *\ndiff_entry_new (char type, char status, unsigned char *sha1, const char *name);\n\nvoid\ndiff_entry_free (DiffEntry *de);\n\n/*\n * @fold_dir_diff: if TRUE, only the top level directory will be included\n *                 in the diff result if a directory with files is added or removed.\n *                 Otherwise all the files in the direcotory will be recursively\n *                 included in the diff result.\n */\nint\ndiff_commits (SeafCommit *commit1, SeafCommit *commit2, GList **results,\n              gboolean fold_dir_diff);\n\nint\ndiff_commit_roots (const char *store_id, int version,\n                   const char *root1, const char *root2, GList **results,\n                   gboolean fold_dir_diff);\n\nint\ndiff_merge (SeafCommit *merge, GList **results, gboolean fold_dir_diff);\n\nint\ndiff_merge_roots (const char *store_id, int version,\n                  const char *merged_root, const char *p1_root, const char *p2_root,\n                  GList **results, gboolean fold_dir_diff);\n\nvoid\ndiff_resolve_renames (GList **diff_entries);\n\nvoid\ndiff_resolve_empty_dirs (GList **diff_entries);\n\nint \ndiff_unmerged_state(int mask);\n\nchar *\nformat_diff_results(GList *results);\n\nchar *\ndiff_results_to_description (GList *results);\n\ntypedef int (*DiffFileCB) (int n,\n                           const char *basedir,\n                           SeafDirent *files[],\n                           void *data);\n\ntypedef int (*DiffDirCB) (int n,\n                          const char *basedir,\n                          SeafDirent *dirs[],\n                          void *data,\n                          gboolean *recurse);\n\ntypedef struct DiffOptions {\n    char store_id[37];\n    int version;\n\n    DiffFileCB file_cb;\n    DiffDirCB dir_cb;\n    void *data;\n} DiffOptions;\n\nint\ndiff_trees (int n, const char *roots[], DiffOptions *opt);\n\n#endif\n"
  },
  {
    "path": "common/fs-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef _GNU_SOURECE\n#define _GNU_SOURCE\nchar *strcasestr (const char *haystack, const char *needle);\n#undef _GNU_SOURCE\n#endif\n#include \"common.h\"\n\n#include <sys/stat.h>\n#include <fcntl.h>\n#include <dirent.h>\n\n#ifndef WIN32\n    #include <arpa/inet.h>\n#endif\n\n#include <openssl/sha.h>\n#include <searpc-utils.h>\n\n#include \"seafile-session.h\"\n#include \"seafile-error.h\"\n#include \"fs-mgr.h\"\n#include \"block-mgr.h\"\n#include \"utils.h\"\n#include \"seaf-utils.h\"\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n#include \"../common/seafile-crypt.h\"\n\n#ifndef SEAFILE_SERVER\n#include \"../daemon/vc-utils.h\"\n#include \"vc-common.h\"\n#endif  /* SEAFILE_SERVER */\n\n#include \"db.h\"\n\n#define SEAF_TMP_EXT \"~\"\n\nstruct _SeafFSManagerPriv {\n    /* GHashTable      *seafile_cache; */\n    GHashTable      *bl_cache;\n};\n\ntypedef struct SeafileOndisk {\n    guint32          type;\n    guint64          file_size;\n    unsigned char    block_ids[0];\n} __attribute__((__packed__)) SeafileOndisk;\n\ntypedef struct DirentOndisk {\n    guint32 mode;\n    char    id[40];\n    guint32 name_len;\n    char    name[0];\n} __attribute__((__packed__)) DirentOndisk;\n\ntypedef struct SeafdirOndisk {\n    guint32 type;\n    char    dirents[0];\n} __attribute__((__packed__)) SeafdirOndisk;\n\n#ifndef SEAFILE_SERVER\nuint32_t\ncalculate_chunk_size (uint64_t total_size);\nstatic int\nwrite_seafile (SeafFSManager *fs_mgr,\n               const char *repo_id, int version,\n               CDCFileDescriptor *cdc,\n               unsigned char *obj_sha1);\n#endif  /* SEAFILE_SERVER */\n\nSeafFSManager *\nseaf_fs_manager_new (SeafileSession *seaf,\n                     const char *seaf_dir)\n{\n    SeafFSManager *mgr = g_new0 (SeafFSManager, 1);\n\n    mgr->seaf = seaf;\n\n    mgr->obj_store = seaf_obj_store_new (seaf, \"fs\");\n    if (!mgr->obj_store) {\n        g_free (mgr);\n        return NULL;\n    }\n\n    mgr->priv = g_new0(SeafFSManagerPriv, 1);\n\n    return mgr;\n}\n\nint\nseaf_fs_manager_init (SeafFSManager *mgr)\n{\n    if (seaf_obj_store_init (mgr->obj_store) < 0) {\n        seaf_warning (\"[fs mgr] Failed to init fs object store.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\n#ifndef SEAFILE_SERVER\nstatic int\ncheckout_block (const char *repo_id,\n                int version,\n                const char *block_id,\n                int wfd,\n                SeafileCrypt *crypt)\n{\n    SeafBlockManager *block_mgr = seaf->block_mgr;\n    BlockHandle *handle;\n    BlockMetadata *bmd;\n    char *dec_out = NULL;\n    int dec_out_len = -1;\n    char *blk_content = NULL;\n\n    handle = seaf_block_manager_open_block (block_mgr,\n                                            repo_id, version,\n                                            block_id, BLOCK_READ);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s\\n\", block_id);\n        return -1;\n    }\n\n    /* first stat the block to get its size */\n    bmd = seaf_block_manager_stat_block_by_handle (block_mgr, handle);\n    if (!bmd) {\n        seaf_warning (\"can't stat block %s.\\n\", block_id);\n        goto checkout_blk_error;\n    }\n\n    /* empty file, skip it */\n    if (bmd->size == 0) {\n        seaf_block_manager_close_block (block_mgr, handle);\n        seaf_block_manager_block_handle_free (block_mgr, handle);\n        return 0;\n    }\n\n    blk_content = (char *)malloc (bmd->size * sizeof(char));\n\n    /* read the block to prepare decryption */\n    if (seaf_block_manager_read_block (block_mgr, handle,\n                                       blk_content, bmd->size) != bmd->size) {\n        seaf_warning (\"Error when reading from block %s.\\n\", block_id);\n        goto checkout_blk_error;\n    }\n\n    if (crypt != NULL) {\n\n        /* An encrypted block size must be a multiple of\n           ENCRYPT_BLK_SIZE\n        */\n        if (bmd->size % ENCRYPT_BLK_SIZE != 0) {\n            seaf_warning (\"Error: An invalid encrypted block, %s \\n\", block_id);\n            goto checkout_blk_error;\n        }\n\n        /* decrypt the block */\n        int ret = seafile_decrypt (&dec_out,\n                                   &dec_out_len,\n                                   blk_content,\n                                   bmd->size,\n                                   crypt);\n\n        if (ret != 0) {\n            seaf_warning (\"Decryt block %s failed. \\n\", block_id);\n            goto checkout_blk_error;\n        }\n\n        /* write the decrypted content */\n        ret = writen (wfd, dec_out, dec_out_len);\n\n\n        if (ret !=  dec_out_len) {\n            seaf_warning (\"Failed to write the decryted block %s.\\n\",\n                       block_id);\n            goto checkout_blk_error;\n        }\n\n        g_free (blk_content);\n        g_free (dec_out);\n\n    } else {\n        /* not an encrypted block */\n        if (writen(wfd, blk_content, bmd->size) != bmd->size) {\n            seaf_warning (\"Failed to write the decryted block %s.\\n\",\n                       block_id);\n            goto checkout_blk_error;\n        }\n        g_free (blk_content);\n    }\n\n    g_free (bmd);\n    seaf_block_manager_close_block (block_mgr, handle);\n    seaf_block_manager_block_handle_free (block_mgr, handle);\n    return 0;\n\ncheckout_blk_error:\n\n    if (blk_content)\n        free (blk_content);\n    if (dec_out)\n        g_free (dec_out);\n    if (bmd)\n        g_free (bmd);\n\n    seaf_block_manager_close_block (block_mgr, handle);\n    seaf_block_manager_block_handle_free (block_mgr, handle);\n    return -1;\n}\n\nint\nseaf_fs_manager_checkout_file (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *file_id,\n                               const char *file_path,\n                               guint32 mode,\n                               guint64 mtime,\n                               SeafileCrypt *crypt,\n                               const char *in_repo_path,\n                               const char *conflict_head_id,\n                               gboolean force_conflict,\n                               gboolean *conflicted,\n                               const char *email)\n{\n    Seafile *seafile;\n    char *blk_id;\n    int wfd;\n    int i;\n    char *tmp_path;\n    char *conflict_path;\n\n    *conflicted = FALSE;\n\n    seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, file_id);\n    if (!seafile) {\n        seaf_warning (\"File %s does not exist.\\n\", file_id);\n        return -1;\n    }\n\n    tmp_path = g_strconcat (file_path, SEAF_TMP_EXT, NULL);\n\n    mode_t rmode = mode & 0100 ? 0777 : 0666;\n    wfd = seaf_util_create (tmp_path, O_WRONLY | O_TRUNC | O_CREAT | O_BINARY,\n                            rmode & ~S_IFMT);\n    if (wfd < 0) {\n        seaf_warning (\"Failed to open file %s for checkout: %s.\\n\",\n                   tmp_path, strerror(errno));\n        goto bad;\n    }\n\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        blk_id = seafile->blk_sha1s[i];\n        if (checkout_block (repo_id, version, blk_id, wfd, crypt) < 0)\n            goto bad;\n    }\n\n    close (wfd);\n    wfd = -1;\n\n    if (force_conflict || seaf_util_rename (tmp_path, file_path) < 0) {\n        *conflicted = TRUE;\n\n        /* XXX\n         * In new syncing protocol and http sync, files are checked out before\n         * the repo is created. So we can't get user email from repo at this point.\n         * So a email parameter is needed.\n         * For old syncing protocol, repo always exists when files are checked out.\n         * This is a quick and dirty hack. A cleaner solution should modifiy the\n         * code of old syncing protocol to pass in email too. But I don't want to\n         * spend more time on the nearly obsoleted code.\n         */\n        const char *suffix = NULL;\n        if (email) {\n            suffix = email;\n        } else {\n            SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n            if (!repo)\n                goto bad;\n            suffix = email;\n        }\n\n        conflict_path = gen_conflict_path (file_path, suffix, (gint64)time(NULL));\n\n        seaf_warning (\"Cannot update %s, creating conflict file %s.\\n\",\n                      file_path, conflict_path);\n\n        /* First try to rename the local version to a conflict file,\n         * this will preserve the version from the server.\n         * If this fails, fall back to checking out the server version\n         * to the conflict file.\n         */\n        if (seaf_util_rename (file_path, conflict_path) == 0) {\n            if (seaf_util_rename (tmp_path, file_path) < 0) {\n                g_free (conflict_path);\n                goto bad;\n            }\n        } else {\n            g_free (conflict_path);\n            conflict_path = gen_conflict_path_wrapper (repo_id, version,\n                                                       conflict_head_id, in_repo_path,\n                                                       file_path);\n            if (!conflict_path)\n                goto bad;\n\n            if (seaf_util_rename (tmp_path, conflict_path) < 0) {\n                g_free (conflict_path);\n                goto bad;\n            }\n        }\n\n        g_free (conflict_path);\n    }\n\n    if (mtime > 0) {\n        /* \n         * Set the checked out file mtime to what it has to be.\n         */\n        if (seaf_set_file_time (file_path, mtime) < 0) {\n            seaf_warning (\"Failed to set mtime for %s.\\n\", file_path);\n        }\n    }\n\n    g_free (tmp_path);\n    seafile_unref (seafile);\n    return 0;\n\nbad:\n    if (wfd >= 0)\n        close (wfd);\n    /* Remove the tmp file if it still exists, in case that rename fails. */\n    seaf_util_unlink (tmp_path);\n    g_free (tmp_path);\n    seafile_unref (seafile);\n    return -1;\n}\n\n#endif /* SEAFILE_SERVER */\n\nstatic void *\ncreate_seafile_v0 (CDCFileDescriptor *cdc, int *ondisk_size, char *seafile_id)\n{\n    SeafileOndisk *ondisk;\n\n    rawdata_to_hex (cdc->file_sum, seafile_id, 20);\n\n    *ondisk_size = sizeof(SeafileOndisk) + cdc->block_nr * 20;\n    ondisk = (SeafileOndisk *)g_new0 (char, *ondisk_size);\n\n    ondisk->type = htonl(SEAF_METADATA_TYPE_FILE);\n    ondisk->file_size = hton64 (cdc->file_size);\n    memcpy (ondisk->block_ids, cdc->blk_sha1s, cdc->block_nr * 20);\n\n    return ondisk;\n}\n\nstatic void *\ncreate_seafile_json (int repo_version,\n                     CDCFileDescriptor *cdc,\n                     int *ondisk_size,\n                     char *seafile_id)\n{\n    json_t *object, *block_id_array;\n\n    object = json_object ();\n\n    json_object_set_int_member (object, \"type\", SEAF_METADATA_TYPE_FILE);\n    json_object_set_int_member (object, \"version\",\n                                seafile_version_from_repo_version(repo_version));\n\n    json_object_set_int_member (object, \"size\", cdc->file_size);\n\n    block_id_array = json_array ();\n    int i;\n    uint8_t *ptr = cdc->blk_sha1s;\n    char block_id[41];\n    for (i = 0; i < cdc->block_nr; ++i) {\n        rawdata_to_hex (ptr, block_id, 20);\n        json_array_append_new (block_id_array, json_string(block_id));\n        ptr += 20;\n    }\n    json_object_set_new (object, \"block_ids\", block_id_array);\n\n    char *data = json_dumps (object, JSON_SORT_KEYS);\n    *ondisk_size = strlen(data);\n\n    /* The seafile object id is sha1 hash of the json object. */\n    unsigned char sha1[20];\n    calculate_sha1 (sha1, data, *ondisk_size);\n    rawdata_to_hex (sha1, seafile_id, 20);\n\n    json_decref (object);\n    return data;\n}\n\nvoid\nseaf_fs_manager_calculate_seafile_id_json (int repo_version,\n                                           CDCFileDescriptor *cdc,\n                                           guint8 *file_id_sha1)\n{\n    json_t *object, *block_id_array;\n\n    object = json_object ();\n\n    json_object_set_int_member (object, \"type\", SEAF_METADATA_TYPE_FILE);\n    json_object_set_int_member (object, \"version\",\n                                seafile_version_from_repo_version(repo_version));\n\n    json_object_set_int_member (object, \"size\", cdc->file_size);\n\n    block_id_array = json_array ();\n    int i;\n    uint8_t *ptr = cdc->blk_sha1s;\n    char block_id[41];\n    for (i = 0; i < cdc->block_nr; ++i) {\n        rawdata_to_hex (ptr, block_id, 20);\n        json_array_append_new (block_id_array, json_string(block_id));\n        ptr += 20;\n    }\n    json_object_set_new (object, \"block_ids\", block_id_array);\n\n    char *data = json_dumps (object, JSON_SORT_KEYS);\n    int ondisk_size = strlen(data);\n\n    /* The seafile object id is sha1 hash of the json object. */\n    calculate_sha1 (file_id_sha1, data, ondisk_size);\n\n    json_decref (object);\n    free (data);\n}\n\nstatic int\nwrite_seafile (SeafFSManager *fs_mgr,\n               const char *repo_id,\n               int version,\n               CDCFileDescriptor *cdc,\n               unsigned char *obj_sha1)\n{\n    int ret = 0;\n    char seafile_id[41];\n    void *ondisk;\n    int ondisk_size;\n\n    if (version > 0) {\n        ondisk = create_seafile_json (version, cdc, &ondisk_size, seafile_id);\n\n        guint8 *compressed;\n        int outlen;\n\n        if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) {\n            ret = 0;\n            free (ondisk);\n            goto out;\n        }\n\n        if (seaf_compress (ondisk, ondisk_size, &compressed, &outlen) < 0) {\n            seaf_warning (\"Failed to compress seafile obj %s:%s.\\n\",\n                          repo_id, seafile_id);\n            ret = -1;\n            free (ondisk);\n            goto out;\n        }\n\n        if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id,\n                                      compressed, outlen, FALSE) < 0)\n            ret = -1;\n        g_free (compressed);\n        free (ondisk);\n    } else {\n        ondisk = create_seafile_v0 (cdc, &ondisk_size, seafile_id);\n        if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, seafile_id)) {\n            ret = 0;\n            g_free (ondisk);\n            goto out;\n        }\n\n        if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, seafile_id,\n                                      ondisk, ondisk_size, FALSE) < 0)\n            ret = -1;\n        g_free (ondisk);\n    }\n\nout:\n    if (ret == 0)\n        hex_to_rawdata (seafile_id, obj_sha1, 20);\n\n    return ret;\n}\n\nuint32_t\ncalculate_chunk_size (uint64_t total_size)\n{\n    const uint64_t GiB = 1073741824;\n    const uint64_t MiB = 1048576;\n\n    if (total_size >= (8 * GiB)) return 8 * MiB;\n    if (total_size >= (4 * GiB)) return 4 * MiB;\n    if (total_size >= (2 * GiB)) return 2 * MiB;\n\n    return 1 * MiB;\n}\n\nstatic int\ndo_write_chunk (const char *repo_id, int version,\n                uint8_t *checksum, const char *buf, int len)\n{\n    SeafBlockManager *blk_mgr = seaf->block_mgr;\n    char chksum_str[41];\n    BlockHandle *handle;\n    int n;\n\n    rawdata_to_hex (checksum, chksum_str, 20);\n\n    /* Don't write if the block already exists. */\n    if (seaf_block_manager_block_exists (seaf->block_mgr,\n                                         repo_id, version,\n                                         chksum_str))\n        return 0;\n\n    handle = seaf_block_manager_open_block (blk_mgr,\n                                            repo_id, version,\n                                            chksum_str, BLOCK_WRITE);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s.\\n\", chksum_str);\n        return -1;\n    }\n\n    n = seaf_block_manager_write_block (blk_mgr, handle, buf, len);\n    if (n < 0) {\n        seaf_warning (\"Failed to write chunk %s.\\n\", chksum_str);\n        seaf_block_manager_close_block (blk_mgr, handle);\n        seaf_block_manager_block_handle_free (blk_mgr, handle);\n        return -1;\n    }\n\n    if (seaf_block_manager_close_block (blk_mgr, handle) < 0) {\n        seaf_warning (\"failed to close block %s.\\n\", chksum_str);\n        seaf_block_manager_block_handle_free (blk_mgr, handle);\n        return -1;\n    }\n\n    if (seaf_block_manager_commit_block (blk_mgr, handle) < 0) {\n        seaf_warning (\"failed to commit chunk %s.\\n\", chksum_str);\n        seaf_block_manager_block_handle_free (blk_mgr, handle);\n        return -1;\n    }\n\n    seaf_block_manager_block_handle_free (blk_mgr, handle);\n    return 0;\n}\n\n/* write the chunk and store its checksum */\nint\nseafile_write_chunk (const char *repo_id,\n                     int version,\n                     CDCDescriptor *chunk,\n                     SeafileCrypt *crypt,\n                     uint8_t *checksum,\n                     gboolean write_data)\n{\n    SHA_CTX ctx;\n    int ret = 0;\n\n    /* Encrypt before write to disk if needed, and we don't encrypt\n     * empty files. */\n    if (crypt != NULL && chunk->len) {\n        char *encrypted_buf = NULL;         /* encrypted output */\n        int enc_len = -1;                /* encrypted length */\n\n        ret = seafile_encrypt (&encrypted_buf, /* output */\n                               &enc_len,      /* output len */\n                               chunk->block_buf, /* input */\n                               chunk->len,       /* input len */\n                               crypt);\n        if (ret != 0) {\n            seaf_warning (\"Error: failed to encrypt block\\n\");\n            return -1;\n        }\n\n        SHA1_Init (&ctx);\n        SHA1_Update (&ctx, encrypted_buf, enc_len);\n        SHA1_Final (checksum, &ctx);\n\n        if (write_data)\n            ret = do_write_chunk (repo_id, version, checksum, encrypted_buf, enc_len);\n        g_free (encrypted_buf);\n    } else {\n        /* not a encrypted repo, go ahead */\n        SHA1_Init (&ctx);\n        SHA1_Update (&ctx, chunk->block_buf, chunk->len);\n        SHA1_Final (checksum, &ctx);\n\n        if (write_data)\n            ret = do_write_chunk (repo_id, version, checksum, chunk->block_buf, chunk->len);\n    }\n\n    return ret;\n}\n\nstatic void\ncreate_cdc_for_empty_file (CDCFileDescriptor *cdc)\n{\n    memset (cdc, 0, sizeof(CDCFileDescriptor));\n}\n\n#if defined SEAFILE_SERVER && defined FULL_FEATURE\n\n#define FIXED_BLOCK_SIZE (1<<20)\n\ntypedef struct ChunkingData {\n    const char *repo_id;\n    int version;\n    const char *file_path;\n    SeafileCrypt *crypt;\n    guint8 *blk_sha1s;\n    GAsyncQueue *finished_tasks;\n} ChunkingData;\n\nstatic void\nchunking_worker (gpointer vdata, gpointer user_data)\n{\n    ChunkingData *data = user_data;\n    CDCDescriptor *chunk = vdata;\n    int fd = -1;\n    ssize_t n;\n    int idx;\n\n    chunk->block_buf = g_new0 (char, chunk->len);\n    if (!chunk->block_buf) {\n        seaf_warning (\"Failed to allow chunk buffer\\n\");\n        goto out;\n    }\n\n    fd = seaf_util_open (data->file_path, O_RDONLY | O_BINARY);\n    if (fd < 0) {\n        seaf_warning (\"Failed to open %s: %s\\n\", data->file_path, strerror(errno));\n        chunk->result = -1;\n        goto out;\n    }\n\n    if (seaf_util_lseek (fd, chunk->offset, SEEK_SET) == (gint64)-1) {\n        seaf_warning (\"Failed to lseek %s: %s\\n\", data->file_path, strerror(errno));\n        chunk->result = -1;\n        goto out;\n    }\n\n    n = readn (fd, chunk->block_buf, chunk->len);\n    if (n < 0) {\n        seaf_warning (\"Failed to read chunk from %s: %s\\n\",\n                      data->file_path, strerror(errno));\n        chunk->result = -1;\n        goto out;\n    }\n\n    chunk->result = seafile_write_chunk (data->repo_id, data->version,\n                                         chunk, data->crypt,\n                                         chunk->checksum, 1);\n    if (chunk->result < 0)\n        goto out;\n\n    idx = chunk->offset / seaf->fixed_block_size;\n    memcpy (data->blk_sha1s + idx * CHECKSUM_LENGTH, chunk->checksum, CHECKSUM_LENGTH);\n\nout:\n    g_free (chunk->block_buf);\n    close (fd);\n    g_async_queue_push (data->finished_tasks, chunk);\n}\n\nstatic int\nsplit_file_to_block (const char *repo_id,\n                     int version,\n                     const char *file_path,\n                     gint64 file_size,\n                     SeafileCrypt *crypt,\n                     CDCFileDescriptor *cdc,\n                     gboolean write_data,\n                     gint64 *indexed)\n{\n    int n_blocks;\n    uint8_t *block_sha1s = NULL;\n    GThreadPool *tpool = NULL;\n    GAsyncQueue *finished_tasks = NULL;\n    GList *pending_tasks = NULL;\n    int n_pending = 0;\n    CDCDescriptor *chunk;\n    int ret = 0;\n\n    n_blocks = (file_size + seaf->fixed_block_size - 1) / seaf->fixed_block_size;\n    block_sha1s = g_new0 (uint8_t, n_blocks * CHECKSUM_LENGTH);\n    if (!block_sha1s) {\n        seaf_warning (\"Failed to allocate block_sha1s.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    finished_tasks = g_async_queue_new ();\n\n    ChunkingData data;\n    memset (&data, 0, sizeof(data));\n    data.repo_id = repo_id;\n    data.version = version;\n    data.file_path = file_path;\n    data.crypt = crypt;\n    data.blk_sha1s = block_sha1s;\n    data.finished_tasks = finished_tasks;\n\n    tpool = g_thread_pool_new (chunking_worker, &data,\n                               seaf->max_indexing_threads, FALSE, NULL);\n    if (!tpool) {\n        seaf_warning (\"Failed to allocate thread pool\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    guint64 offset = 0;\n    guint64 len;\n    guint64 left = (guint64)file_size;\n    while (left > 0) {\n        len = ((left >= seaf->fixed_block_size) ? seaf->fixed_block_size : left);\n\n        chunk = g_new0 (CDCDescriptor, 1);\n        chunk->offset = offset;\n        chunk->len = (guint32)len;\n\n        g_thread_pool_push (tpool, chunk, NULL);\n        pending_tasks = g_list_prepend (pending_tasks, chunk);\n        n_pending++;\n\n        left -= len;\n        offset += len;\n    }\n\n    while ((chunk = g_async_queue_pop (finished_tasks)) != NULL) {\n        if (chunk->result < 0) {\n            ret = -1;\n            goto out;\n        }\n        if (indexed)\n            *indexed += seaf->fixed_block_size;\n\n        if ((--n_pending) <= 0) {\n            if (indexed)\n                *indexed = (guint64)file_size;\n            break;\n        }\n    }\n\n    cdc->block_nr = n_blocks;\n    cdc->blk_sha1s = block_sha1s;\n\nout:\n    if (tpool)\n        g_thread_pool_free (tpool, TRUE, TRUE);\n    if (finished_tasks)\n        g_async_queue_unref (finished_tasks);\n    g_list_free_full (pending_tasks, g_free);\n    if (ret < 0)\n        g_free (block_sha1s);\n\n    return ret;\n}\n\n#endif  /* SEAFILE_SERVER */\n\n#define CDC_AVERAGE_BLOCK_SIZE (1 << 23) /* 8MB */\n#define CDC_MIN_BLOCK_SIZE (6 * (1 << 20)) /* 6MB */\n#define CDC_MAX_BLOCK_SIZE (10 * (1 << 20)) /* 10MB */\n\nint\nseaf_fs_manager_index_blocks (SeafFSManager *mgr,\n                              const char *repo_id,\n                              int version,\n                              const char *file_path,\n                              unsigned char sha1[],\n                              gint64 *size,\n                              SeafileCrypt *crypt,\n                              gboolean write_data,\n                              gboolean use_cdc,\n                              gint64 *indexed)\n{\n    SeafStat sb;\n    CDCFileDescriptor cdc;\n\n    if (seaf_stat (file_path, &sb) < 0) {\n        seaf_warning (\"Bad file %s: %s.\\n\", file_path, strerror(errno));\n        return -1;\n    }\n\n    g_return_val_if_fail (S_ISREG(sb.st_mode), -1);\n\n    if (sb.st_size == 0) {\n        /* handle empty file. */\n        memset (sha1, 0, 20);\n        create_cdc_for_empty_file (&cdc);\n    } else {\n        memset (&cdc, 0, sizeof(cdc));\n#if defined SEAFILE_SERVER && defined FULL_FEATURE\n        if (use_cdc || version == 0) {\n            cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE;\n            cdc.block_min_sz = CDC_MIN_BLOCK_SIZE;\n            cdc.block_max_sz = CDC_MAX_BLOCK_SIZE;\n            cdc.write_block = seafile_write_chunk;\n            memcpy (cdc.repo_id, repo_id, 36);\n            cdc.version = version;\n            if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) {\n                seaf_warning (\"Failed to chunk file with CDC.\\n\");\n                return -1;\n            }\n        } else {\n            memcpy (cdc.repo_id, repo_id, 36);\n            cdc.version = version;\n            cdc.file_size = sb.st_size;\n            if (split_file_to_block (repo_id, version, file_path, sb.st_size,\n                                     crypt, &cdc, write_data, indexed) < 0) {\n                return -1;\n            }\n        }\n#else\n        cdc.block_sz = CDC_AVERAGE_BLOCK_SIZE;\n        cdc.block_min_sz = CDC_MIN_BLOCK_SIZE;\n        cdc.block_max_sz = CDC_MAX_BLOCK_SIZE;\n        cdc.write_block = seafile_write_chunk;\n        memcpy (cdc.repo_id, repo_id, 36);\n        cdc.version = version;\n        if (filename_chunk_cdc (file_path, &cdc, crypt, write_data, indexed) < 0) {\n            seaf_warning (\"Failed to chunk file with CDC.\\n\");\n            return -1;\n        }\n#endif\n\n        if (write_data && write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {\n            g_free (cdc.blk_sha1s);\n            seaf_warning (\"Failed to write seafile for %s.\\n\", file_path);\n            return -1;\n        }\n    }\n\n    *size = (gint64)sb.st_size;\n\n    if (cdc.blk_sha1s)\n        free (cdc.blk_sha1s);\n\n    return 0;\n}\n\nstatic int\ncheck_and_write_block (const char *repo_id, int version,\n                       const char *path, unsigned char *sha1, const char *block_id)\n{\n    char *content;\n    gsize len;\n    GError *error = NULL;\n    int ret = 0;\n\n    if (!g_file_get_contents (path, &content, &len, &error)) {\n        if (error) {\n            seaf_warning (\"Failed to read %s: %s.\\n\", path, error->message);\n            g_clear_error (&error);\n            return -1;\n        }\n    }\n\n    SHA_CTX block_ctx;\n    unsigned char checksum[20];\n\n    SHA1_Init (&block_ctx);\n    SHA1_Update (&block_ctx, content, len);\n    SHA1_Final (checksum, &block_ctx);\n\n    if (memcmp (checksum, sha1, 20) != 0) {\n        seaf_warning (\"Block id %s:%s doesn't match content.\\n\", repo_id, block_id);\n        ret = -1;\n        goto out;\n    }\n\n    if (do_write_chunk (repo_id, version, sha1, content, len) < 0) {\n        ret = -1;\n        goto out;\n    }\n\nout:\n    g_free (content);\n    return ret;\n}\n\nstatic int\ncheck_and_write_file_blocks (CDCFileDescriptor *cdc, GList *paths, GList *blockids)\n{\n    GList *ptr, *q;\n    SHA_CTX file_ctx;\n    int ret = 0;\n\n    SHA1_Init (&file_ctx);\n    for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) {\n        char *path = ptr->data;\n        char *blk_id = q->data;\n        unsigned char sha1[20];\n\n        hex_to_rawdata (blk_id, sha1, 20);\n        ret = check_and_write_block (cdc->repo_id, cdc->version, path, sha1, blk_id);\n        if (ret < 0)\n            goto out;\n\n        memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH,\n                sha1, CHECKSUM_LENGTH);\n        cdc->block_nr++;\n\n        SHA1_Update (&file_ctx, sha1, 20);\n    }\n\n    SHA1_Final (cdc->file_sum, &file_ctx);\n\nout:\n    return ret;\n}\n\nstatic int\ncheck_existed_file_blocks (CDCFileDescriptor *cdc, GList *blockids)\n{\n    GList *q;\n    SHA_CTX file_ctx;\n    int ret = 0;\n\n    SHA1_Init (&file_ctx);\n    for (q = blockids; q; q = q->next) {\n        char *blk_id = q->data;\n        unsigned char sha1[20];\n\n        if (!seaf_block_manager_block_exists (\n                seaf->block_mgr, cdc->repo_id, cdc->version, blk_id)) {\n            ret = -1;\n            goto out;\n        }\n\n        hex_to_rawdata (blk_id, sha1, 20);\n        memcpy (cdc->blk_sha1s + cdc->block_nr * CHECKSUM_LENGTH,\n                sha1, CHECKSUM_LENGTH);\n        cdc->block_nr++;\n\n        SHA1_Update (&file_ctx, sha1, 20);\n    }\n\n    SHA1_Final (cdc->file_sum, &file_ctx);\n\nout:\n    return ret;\n}\n\nstatic int\ninit_file_cdc (CDCFileDescriptor *cdc,\n               const char *repo_id, int version,\n               int block_nr, gint64 file_size)\n{\n    memset (cdc, 0, sizeof(CDCFileDescriptor));\n\n    cdc->file_size = file_size;\n\n    cdc->blk_sha1s =  (uint8_t *)calloc (sizeof(uint8_t), block_nr * CHECKSUM_LENGTH);\n    if (!cdc->blk_sha1s) {\n        seaf_warning (\"Failed to alloc block sha1 array.\\n\");\n        return -1;\n    }\n\n    memcpy (cdc->repo_id, repo_id, 36);\n    cdc->version = version;\n\n    return 0;\n}\n\nint\nseaf_fs_manager_index_file_blocks (SeafFSManager *mgr,\n                                   const char *repo_id,\n                                   int version,\n                                   GList *paths,\n                                   GList *blockids,\n                                   unsigned char sha1[],\n                                   gint64 file_size)\n{\n    int ret = 0;\n    CDCFileDescriptor cdc;\n\n    if (!paths) {\n        /* handle empty file. */\n        memset (sha1, 0, 20);\n        create_cdc_for_empty_file (&cdc);\n    } else {\n        int block_nr = g_list_length (paths);\n\n        if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) {\n            ret = -1;\n            goto out;\n        }\n\n        if (check_and_write_file_blocks (&cdc, paths, blockids) < 0) {\n            seaf_warning (\"Failed to check and write file blocks.\\n\");\n            ret = -1;\n            goto out;\n        }\n\n        if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {\n            seaf_warning (\"Failed to write seafile.\\n\");\n            ret = -1;\n            goto out;\n        }\n    }\n\nout:\n    if (cdc.blk_sha1s)\n        free (cdc.blk_sha1s);\n\n    return ret;\n}\n\nint\nseaf_fs_manager_index_raw_blocks (SeafFSManager *mgr,\n                                  const char *repo_id,\n                                  int version,\n                                  GList *paths,\n                                  GList *blockids)\n{\n    int ret = 0;\n    GList *ptr, *q;\n\n    if (!paths)\n        return -1;\n\n    for (ptr = paths, q = blockids; ptr; ptr = ptr->next, q = q->next) {\n        char *path = ptr->data;\n        char *blk_id = q->data;\n        unsigned char sha1[20];\n\n        hex_to_rawdata (blk_id, sha1, 20);\n        ret = check_and_write_block (repo_id, version, path, sha1, blk_id);\n        if (ret < 0)\n            break;\n\n    }\n\n    return ret;\n}\n\nint\nseaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr,\n                                           const char *repo_id,\n                                           int version,\n                                           GList *blockids,\n                                           unsigned char sha1[],\n                                           gint64 file_size)\n{\n    int ret = 0;\n    CDCFileDescriptor cdc;\n\n    int block_nr = g_list_length (blockids);\n    if (block_nr == 0) {\n        /* handle empty file. */\n        memset (sha1, 0, 20);\n        create_cdc_for_empty_file (&cdc);\n    } else {\n        if (init_file_cdc (&cdc, repo_id, version, block_nr, file_size) < 0) {\n            ret = -1;\n            goto out;\n        }\n\n        if (check_existed_file_blocks (&cdc, blockids) < 0) {\n            seaf_warning (\"Failed to check and write file blocks.\\n\");\n            ret = -1;\n            goto out;\n        }\n\n        if (write_seafile (mgr, repo_id, version, &cdc, sha1) < 0) {\n            seaf_warning (\"Failed to write seafile.\\n\");\n            ret = -1;\n            goto out;\n        }\n    }\n\nout:\n    if (cdc.blk_sha1s)\n        free (cdc.blk_sha1s);\n\n    return ret;\n}\n\nvoid\nseafile_ref (Seafile *seafile)\n{\n    ++seafile->ref_count;\n}\n\nstatic void\nseafile_free (Seafile *seafile)\n{\n    int i;\n\n    if (seafile->blk_sha1s) {\n        for (i = 0; i < seafile->n_blocks; ++i)\n            g_free (seafile->blk_sha1s[i]);\n        g_free (seafile->blk_sha1s);\n    }\n\n    g_free (seafile);\n}\n\nvoid\nseafile_unref (Seafile *seafile)\n{\n    if (!seafile)\n        return;\n\n    if (--seafile->ref_count <= 0)\n        seafile_free (seafile);\n}\n\nstatic Seafile *\nseafile_from_v0_data (const char *id, const void *data, int len)\n{\n    const SeafileOndisk *ondisk = data;\n    Seafile *seafile;\n    int id_list_len, n_blocks;\n\n    if (len < sizeof(SeafileOndisk)) {\n        seaf_warning (\"[fs mgr] Corrupt seafile object %s.\\n\", id);\n        return NULL;\n    }\n\n    if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) {\n        seaf_warning (\"[fd mgr] %s is not a file.\\n\", id);\n        return NULL;\n    }\n\n    id_list_len = len - sizeof(SeafileOndisk);\n    if (id_list_len % 20 != 0) {\n        seaf_warning (\"[fs mgr] Corrupt seafile object %s.\\n\", id);\n        return NULL;\n    }\n    n_blocks = id_list_len / 20;\n\n    seafile = g_new0 (Seafile, 1);\n\n    seafile->object.type = SEAF_METADATA_TYPE_FILE;\n    seafile->version = 0;\n    memcpy (seafile->file_id, id, 41);\n    seafile->file_size = ntoh64 (ondisk->file_size);\n    seafile->n_blocks = n_blocks;\n\n    seafile->blk_sha1s = g_new0 (char*, seafile->n_blocks);\n    const unsigned char *blk_sha1_ptr = ondisk->block_ids;\n    int i;\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        char *blk_sha1 = g_new0 (char, 41);\n        seafile->blk_sha1s[i] = blk_sha1;\n        rawdata_to_hex (blk_sha1_ptr, blk_sha1, 20);\n        blk_sha1_ptr += 20;\n    }\n\n    seafile->ref_count = 1;\n    return seafile;\n}\n\nstatic Seafile *\nseafile_from_json_object (const char *id, json_t *object)\n{\n    json_t *block_id_array = NULL;\n    int type;\n    int version;\n    guint64 file_size;\n    Seafile *seafile = NULL;\n\n    /* Sanity checks. */\n    type = json_object_get_int_member (object, \"type\");\n    if (type != SEAF_METADATA_TYPE_FILE) {\n        seaf_debug (\"Object %s is not a file.\\n\", id);\n        return NULL;\n    }\n\n    version = (int) json_object_get_int_member (object, \"version\");\n    if (version < 1) {\n        seaf_debug (\"Seafile object %s version should be > 0, version is %d.\\n\",\n                    id, version);\n        return NULL;\n    }\n\n    file_size = (guint64) json_object_get_int_member (object, \"size\");\n\n    block_id_array = json_object_get (object, \"block_ids\");\n    if (!block_id_array) {\n        seaf_debug (\"No block id array in seafile object %s.\\n\", id);\n        return NULL;\n    }\n\n    seafile = g_new0 (Seafile, 1);\n\n    seafile->object.type = SEAF_METADATA_TYPE_FILE;\n\n    memcpy (seafile->file_id, id, 40);\n    seafile->version = version;\n    seafile->file_size = file_size;\n    seafile->n_blocks = json_array_size (block_id_array);\n    seafile->blk_sha1s = g_new0 (char *, seafile->n_blocks);\n\n    int i;\n    json_t *block_id_obj;\n    const char *block_id;\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        block_id_obj = json_array_get (block_id_array, i);\n        block_id = json_string_value (block_id_obj);\n        if (!block_id || !is_object_id_valid(block_id)) {\n            seafile_free (seafile);\n            return NULL;\n        }\n        seafile->blk_sha1s[i] = g_strdup(block_id);\n    }\n\n    seafile->ref_count = 1;\n\n    return seafile;\n}\n\nstatic Seafile *\nseafile_from_json (const char *id, void *data, int len)\n{\n    guint8 *decompressed;\n    int outlen;\n    json_t *object = NULL;\n    json_error_t error;\n    Seafile *seafile;\n\n    if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {\n        seaf_warning (\"Failed to decompress seafile object %s.\\n\", id);\n        return NULL;\n    }\n\n    object = json_loadb ((const char *)decompressed, outlen, 0, &error);\n    g_free (decompressed);\n    if (!object) {\n        if (error.text)\n            seaf_warning (\"Failed to load seafile json object: %s.\\n\", error.text);\n        else\n            seaf_warning (\"Failed to load seafile json object.\\n\");\n        return NULL;\n    }\n\n    seafile = seafile_from_json_object (id, object);\n\n    json_decref (object);\n    return seafile;\n}\n\nstatic Seafile *\nseafile_from_data (const char *id, void *data, int len, gboolean is_json)\n{\n    if (is_json)\n        return seafile_from_json (id, data, len);\n    else\n        return seafile_from_v0_data (id, data, len);\n}\n\nSeafile *\nseaf_fs_manager_get_seafile (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *file_id)\n{\n    void *data;\n    int len;\n    Seafile *seafile;\n\n#if 0\n    seafile = g_hash_table_lookup (mgr->priv->seafile_cache, file_id);\n    if (seafile) {\n        seafile_ref (seafile);\n        return seafile;\n    }\n#endif\n\n    if (memcmp (file_id, EMPTY_SHA1, 40) == 0) {\n        seafile = g_new0 (Seafile, 1);\n        memset (seafile->file_id, '0', 40);\n        seafile->ref_count = 1;\n        return seafile;\n    }\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 file_id, &data, &len) < 0) {\n        seaf_warning (\"[fs mgr] Failed to read file %s.\\n\", file_id);\n        return NULL;\n    }\n\n    seafile = seafile_from_data (file_id, data, len, (version > 0));\n    g_free (data);\n\n#if 0\n    /*\n     * Add to cache. Also increase ref count.\n     */\n    seafile_ref (seafile);\n    g_hash_table_insert (mgr->priv->seafile_cache, g_strdup(file_id), seafile);\n#endif\n\n    return seafile;\n}\n\nstatic guint8 *\nseafile_to_v0_data (Seafile *file, int *len)\n{\n    SeafileOndisk *ondisk;\n\n    *len = sizeof(SeafileOndisk) + file->n_blocks * 20;\n    ondisk = (SeafileOndisk *)g_new0 (char, *len);\n\n    ondisk->type = htonl(SEAF_METADATA_TYPE_FILE);\n    ondisk->file_size = hton64 (file->file_size);\n\n    guint8 *ptr = ondisk->block_ids;\n    int i;\n    for (i = 0; i < file->n_blocks; ++i) {\n        hex_to_rawdata (file->blk_sha1s[i], ptr, 20);\n        ptr += 20;\n    }\n\n    return (guint8 *)ondisk;\n}\n\nstatic guint8 *\nseafile_to_json (Seafile *file, int *len)\n{\n    json_t *object, *block_id_array;\n\n    object = json_object ();\n\n    json_object_set_int_member (object, \"type\", SEAF_METADATA_TYPE_FILE);\n    json_object_set_int_member (object, \"version\", file->version);\n\n    json_object_set_int_member (object, \"size\", file->file_size);\n\n    block_id_array = json_array ();\n    int i;\n    for (i = 0; i < file->n_blocks; ++i) {\n        json_array_append_new (block_id_array, json_string(file->blk_sha1s[i]));\n    }\n    json_object_set_new (object, \"block_ids\", block_id_array);\n\n    char *data = json_dumps (object, JSON_SORT_KEYS);\n    *len = strlen(data);\n\n    unsigned char sha1[20];\n    calculate_sha1 (sha1, data, *len);\n    rawdata_to_hex (sha1, file->file_id, 20);\n\n    json_decref (object);\n    return (guint8 *)data;\n}\n\nstatic guint8 *\nseafile_to_data (Seafile *file, int *len)\n{\n    if (file->version > 0) {\n        guint8 *data;\n        int orig_len;\n        guint8 *compressed;\n\n        data = seafile_to_json (file, &orig_len);\n        if (!data)\n            return NULL;\n\n        if (seaf_compress (data, orig_len, &compressed, len) < 0) {\n            seaf_warning (\"Failed to compress file object %s.\\n\", file->file_id);\n            g_free (data);\n            return NULL;\n        }\n        g_free (data);\n        return compressed;\n    } else\n        return seafile_to_v0_data (file, len);\n}\n\nint\nseafile_save (SeafFSManager *fs_mgr,\n              const char *repo_id,\n              int version,\n              Seafile *file)\n{\n    guint8 *data;\n    int len;\n    int ret = 0;\n\n    if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, file->file_id))\n        return 0;\n\n    data = seafile_to_data (file, &len);\n    if (!data)\n        return -1;\n\n    if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, file->file_id,\n                                  data, len, FALSE) < 0)\n        ret = -1;\n\n    g_free (data);\n    return ret;\n}\n\nstatic void compute_dir_id_v0 (SeafDir *dir, GList *entries)\n{\n    SHA_CTX ctx;\n    GList *p;\n    uint8_t sha1[20];\n    SeafDirent *dent;\n    guint32 mode_le;\n\n    /* ID for empty dirs is EMPTY_SHA1. */\n    if (entries == NULL) {\n        memset (dir->dir_id, '0', 40);\n        return;\n    }\n\n    SHA1_Init (&ctx);\n    for (p = entries; p; p = p->next) {\n        dent = (SeafDirent *)p->data;\n        SHA1_Update (&ctx, dent->id, 40);\n        SHA1_Update (&ctx, dent->name, dent->name_len);\n        /* Convert mode to little endian before compute. */\n        if (G_BYTE_ORDER == G_BIG_ENDIAN)\n            mode_le = GUINT32_SWAP_LE_BE (dent->mode);\n        else\n            mode_le = dent->mode;\n        SHA1_Update (&ctx, &mode_le, sizeof(mode_le));\n    }\n    SHA1_Final (sha1, &ctx);\n\n    rawdata_to_hex (sha1, dir->dir_id, 20);\n}\n\nSeafDir *\nseaf_dir_new (const char *id, GList *entries, int version)\n{\n    SeafDir *dir;\n\n    dir = g_new0(SeafDir, 1);\n\n    dir->version = version;\n    if (id != NULL) {\n        memcpy(dir->dir_id, id, 40);\n        dir->dir_id[40] = '\\0';\n    } else if (version == 0) {\n        compute_dir_id_v0 (dir, entries);\n    }\n    dir->entries = entries;\n\n    if (dir->entries != NULL)\n        dir->ondisk = seaf_dir_to_data (dir, &dir->ondisk_size);\n    else\n        memcpy (dir->dir_id, EMPTY_SHA1, 40);\n\n    return dir;\n}\n\nvoid\nseaf_dir_free (SeafDir *dir)\n{\n    if (dir == NULL)\n        return;\n\n    GList *ptr = dir->entries;\n    while (ptr) {\n        seaf_dirent_free ((SeafDirent *)ptr->data);\n        ptr = ptr->next;\n    }\n\n    g_list_free (dir->entries);\n    g_free (dir->ondisk);\n    g_free(dir);\n}\n\nSeafDirent *\nseaf_dirent_new (int version, const char *sha1, int mode, const char *name,\n                 gint64 mtime, const char *modifier, gint64 size)\n{\n    SeafDirent *dent;\n\n    dent = g_new0 (SeafDirent, 1);\n    dent->version = version;\n    memcpy(dent->id, sha1, 40);\n    dent->id[40] = '\\0';\n    /* Mode for files must have 0644 set. To prevent the caller from forgetting,\n     * we set the bits here.\n     */\n    if (S_ISREG(mode))\n        dent->mode = (mode | 0644);\n    else\n        dent->mode = mode;\n    dent->name = g_strdup(name);\n    dent->name_len = strlen(name);\n\n    if (version > 0) {\n        dent->mtime = mtime;\n        if (S_ISREG(mode)) {\n            dent->modifier = g_strdup(modifier);\n            dent->size = size;\n        }\n    }\n\n    return dent;\n}\n\nvoid \nseaf_dirent_free (SeafDirent *dent)\n{\n    if (!dent)\n        return;\n    g_free (dent->name);\n    g_free (dent->modifier);\n    g_free (dent);\n}\n\nSeafDirent *\nseaf_dirent_dup (SeafDirent *dent)\n{\n    SeafDirent *new_dent;\n\n    new_dent = g_memdup (dent, sizeof(SeafDirent));\n    new_dent->name = g_strdup(dent->name);\n    new_dent->modifier = g_strdup(dent->modifier);\n\n    return new_dent;\n}\n\nstatic SeafDir *\nseaf_dir_from_v0_data (const char *dir_id, const uint8_t *data, int len)\n{\n    SeafDir *root;\n    SeafDirent *dent;\n    const uint8_t *ptr;\n    int remain;\n    int dirent_base_size;\n    guint32 meta_type;\n    guint32 name_len;\n\n    ptr = data;\n    remain = len;\n\n    meta_type = get32bit (&ptr);\n    remain -= 4;\n    if (meta_type != SEAF_METADATA_TYPE_DIR) {\n        seaf_warning (\"Data does not contain a directory.\\n\");\n        return NULL;\n    }\n\n    root = g_new0(SeafDir, 1);\n    root->object.type = SEAF_METADATA_TYPE_DIR;\n    root->version = 0;\n    memcpy(root->dir_id, dir_id, 40);\n    root->dir_id[40] = '\\0';\n\n    dirent_base_size = 2 * sizeof(guint32) + 40;\n    while (remain > dirent_base_size) {\n        dent = g_new0(SeafDirent, 1);\n\n        dent->version = 0;\n        dent->mode = get32bit (&ptr);\n        memcpy (dent->id, ptr, 40);\n        dent->id[40] = '\\0';\n        ptr += 40;\n        name_len = get32bit (&ptr);\n        remain -= dirent_base_size;\n        if (remain >= name_len) {\n            dent->name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1);\n            dent->name = g_strndup((const char *)ptr, dent->name_len);\n            ptr += dent->name_len;\n            remain -= dent->name_len;\n        } else {\n            seaf_warning (\"Bad data format for dir objcet %s.\\n\", dir_id);\n            g_free (dent);\n            goto bad;\n        }\n\n        root->entries = g_list_prepend (root->entries, dent);\n    }\n\n    root->entries = g_list_reverse (root->entries);\n\n    return root;\n\nbad:\n    seaf_dir_free (root);\n    return NULL;\n}\n\nstatic SeafDirent *\nparse_dirent (const char *dir_id, int version, json_t *object)\n{\n    guint32 mode;\n    const char *id;\n    const char *name;\n    gint64 mtime;\n    const char *modifier;\n    gint64 size;\n\n    mode = (guint32) json_object_get_int_member (object, \"mode\");\n\n    id = json_object_get_string_member (object, \"id\");\n    if (!id) {\n        seaf_debug (\"Dirent id not set for dir object %s.\\n\", dir_id);\n        return NULL;\n    }\n    if (!is_object_id_valid (id)) {\n        seaf_debug (\"Dirent id is invalid for dir object %s.\\n\", dir_id);\n        return NULL;\n    }\n\n    name = json_object_get_string_member (object, \"name\");\n    if (!name) {\n        seaf_debug (\"Dirent name not set for dir object %s.\\n\", dir_id);\n        return NULL;\n    }\n\n    mtime = json_object_get_int_member (object, \"mtime\");\n    if (S_ISREG(mode)) {\n        modifier = json_object_get_string_member (object, \"modifier\");\n        if (!modifier) {\n            seaf_debug (\"Dirent modifier not set for dir object %s.\\n\", dir_id);\n            return NULL;\n        }\n        size = json_object_get_int_member (object, \"size\");\n    }\n\n    SeafDirent *dirent = g_new0 (SeafDirent, 1);\n    dirent->version = version;\n    dirent->mode = mode;\n    memcpy (dirent->id, id, 40);\n    dirent->name_len = strlen(name);\n    dirent->name = g_strdup(name);\n    dirent->mtime = mtime;\n    if (S_ISREG(mode)) {\n        dirent->modifier = g_strdup(modifier);\n        dirent->size = size;\n    }\n\n    return dirent;\n}\n\nstatic SeafDir *\nseaf_dir_from_json_object (const char *dir_id, json_t *object)\n{\n    json_t *dirent_array = NULL;\n    int type;\n    int version;\n    SeafDir *dir = NULL;\n\n    /* Sanity checks. */\n    type = json_object_get_int_member (object, \"type\");\n    if (type != SEAF_METADATA_TYPE_DIR) {\n        seaf_debug (\"Object %s is not a dir.\\n\", dir_id);\n        return NULL;\n    }\n\n    version = (int) json_object_get_int_member (object, \"version\");\n    if (version < 1) {\n        seaf_debug (\"Dir object %s version should be > 0, version is %d.\\n\",\n                    dir_id, version);\n        return NULL;\n    }\n\n    dirent_array = json_object_get (object, \"dirents\");\n    if (!dirent_array) {\n        seaf_debug (\"No dirents in dir object %s.\\n\", dir_id);\n        return NULL;\n    }\n\n    dir = g_new0 (SeafDir, 1);\n\n    dir->object.type = SEAF_METADATA_TYPE_DIR;\n\n    memcpy (dir->dir_id, dir_id, 40);\n    dir->version = version;\n\n    size_t n_dirents = json_array_size (dirent_array);\n    int i;\n    json_t *dirent_obj;\n    SeafDirent *dirent;\n    for (i = 0; i < n_dirents; ++i) {\n        dirent_obj = json_array_get (dirent_array, i);\n        dirent = parse_dirent (dir_id, version, dirent_obj);\n        if (!dirent) {\n            seaf_dir_free (dir);\n            return NULL;\n        }\n        dir->entries = g_list_prepend (dir->entries, dirent);\n    }\n    dir->entries = g_list_reverse (dir->entries);\n\n    return dir;\n}\n\nstatic SeafDir *\nseaf_dir_from_json (const char *dir_id, uint8_t *data, int len)\n{\n    guint8 *decompressed;\n    int outlen;\n    json_t *object = NULL;\n    json_error_t error;\n    SeafDir *dir;\n\n    if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {\n        seaf_warning (\"Failed to decompress dir object %s.\\n\", dir_id);\n        return NULL;\n    }\n\n    object = json_loadb ((const char *)decompressed, outlen, 0, &error);\n    g_free (decompressed);\n    if (!object) {\n        if (error.text)\n            seaf_warning (\"Failed to load seafdir json object: %s.\\n\", error.text);\n        else\n            seaf_warning (\"Failed to load seafdir json object.\\n\");\n        return NULL;\n    }\n\n    dir = seaf_dir_from_json_object (dir_id, object);\n\n    json_decref (object);\n    return dir;\n}\n\nSeafDir *\nseaf_dir_from_data (const char *dir_id, uint8_t *data, int len,\n                    gboolean is_json)\n{\n    if (is_json)\n        return seaf_dir_from_json (dir_id, data, len);\n    else\n        return seaf_dir_from_v0_data (dir_id, data, len);\n}\n\ninline static int\nondisk_dirent_size (SeafDirent *dirent)\n{\n    return sizeof(DirentOndisk) + dirent->name_len;\n}\n\nstatic void *\nseaf_dir_to_v0_data (SeafDir *dir, int *len)\n{\n    SeafdirOndisk *ondisk;\n    int dir_ondisk_size = sizeof(SeafdirOndisk);\n    GList *dirents = dir->entries;\n    GList *ptr;\n    SeafDirent *de;\n    char *p;\n    DirentOndisk *de_ondisk;\n\n    for (ptr = dirents; ptr; ptr = ptr->next) {\n        de = ptr->data;\n        dir_ondisk_size += ondisk_dirent_size (de);\n    }\n\n    *len = dir_ondisk_size;\n    ondisk = (SeafdirOndisk *) g_new0 (char, dir_ondisk_size);\n\n    ondisk->type = htonl (SEAF_METADATA_TYPE_DIR);\n    p = ondisk->dirents;\n    for (ptr = dirents; ptr; ptr = ptr->next) {\n        de = ptr->data;\n        de_ondisk = (DirentOndisk *) p;\n\n        de_ondisk->mode = htonl(de->mode);\n        memcpy (de_ondisk->id, de->id, 40);\n        de_ondisk->name_len = htonl (de->name_len);\n        memcpy (de_ondisk->name, de->name, de->name_len);\n\n        p += ondisk_dirent_size (de);\n    }\n\n    return (void *)ondisk;\n}\n\nstatic void\nadd_to_dirent_array (json_t *array, SeafDirent *dirent)\n{\n    json_t *object;\n\n    object = json_object ();\n    json_object_set_int_member (object, \"mode\", dirent->mode);\n    json_object_set_string_member (object, \"id\", dirent->id);\n    json_object_set_string_member (object, \"name\", dirent->name);\n    json_object_set_int_member (object, \"mtime\", dirent->mtime);\n    if (S_ISREG(dirent->mode)) {\n        json_object_set_string_member (object, \"modifier\", dirent->modifier);\n        json_object_set_int_member (object, \"size\", dirent->size);\n    }\n\n    json_array_append_new (array, object);\n}\n\nstatic void *\nseaf_dir_to_json (SeafDir *dir, int *len)\n{\n    json_t *object, *dirent_array;\n    GList *ptr;\n    SeafDirent *dirent;\n\n    object = json_object ();\n\n    json_object_set_int_member (object, \"type\", SEAF_METADATA_TYPE_DIR);\n    json_object_set_int_member (object, \"version\", dir->version);\n\n    dirent_array = json_array ();\n    for (ptr = dir->entries; ptr; ptr = ptr->next) {\n        dirent = ptr->data;\n        add_to_dirent_array (dirent_array, dirent);\n    }\n    json_object_set_new (object, \"dirents\", dirent_array);\n\n    char *data = json_dumps (object, JSON_SORT_KEYS);\n    *len = strlen(data);\n\n    /* The dir object id is sha1 hash of the json object. */\n    unsigned char sha1[20];\n    calculate_sha1 (sha1, data, *len);\n    rawdata_to_hex (sha1, dir->dir_id, 20);\n\n    json_decref (object);\n    return data;\n}\n\nvoid *\nseaf_dir_to_data (SeafDir *dir, int *len)\n{\n    if (dir->version > 0) {\n        guint8 *data;\n        int orig_len;\n        guint8 *compressed;\n\n        data = seaf_dir_to_json (dir, &orig_len);\n        if (!data)\n            return NULL;\n\n        if (seaf_compress (data, orig_len, &compressed, len) < 0) {\n            seaf_warning (\"Failed to compress dir object %s.\\n\", dir->dir_id);\n            g_free (data);\n            return NULL;\n        }\n\n        g_free (data);\n        return compressed;\n    } else\n        return seaf_dir_to_v0_data (dir, len);\n}\n\nint\nseaf_dir_save (SeafFSManager *fs_mgr,\n               const char *repo_id,\n               int version,\n               SeafDir *dir)\n{\n    int ret = 0;\n\n    /* Don't need to save empty dir on disk. */\n    if (memcmp (dir->dir_id, EMPTY_SHA1, 40) == 0)\n        return 0;\n\n    if (seaf_obj_store_obj_exists (fs_mgr->obj_store, repo_id, version, dir->dir_id))\n        return 0;\n\n    if (seaf_obj_store_write_obj (fs_mgr->obj_store, repo_id, version, dir->dir_id,\n                                  dir->ondisk, dir->ondisk_size, FALSE) < 0)\n        ret = -1;\n\n    return ret;\n}\n\nSeafDir *\nseaf_fs_manager_get_seafdir (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *dir_id)\n{\n    void *data;\n    int len;\n    SeafDir *dir;\n\n    /* TODO: add hash cache */\n\n    if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) {\n        dir = g_new0 (SeafDir, 1);\n        dir->version = version;\n        memset (dir->dir_id, '0', 40);\n        return dir;\n    }\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 dir_id, &data, &len) < 0) {\n        seaf_warning (\"[fs mgr] Failed to read dir %s.\\n\", dir_id);\n        return NULL;\n    }\n\n    dir = seaf_dir_from_data (dir_id, data, len, (version > 0));\n    g_free (data);\n\n    return dir;\n}\n\nstatic gint\ncompare_dirents (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *denta = a, *dentb = b;\n\n    return strcmp (dentb->name, denta->name);\n}\n\nstatic gboolean\nis_dirents_sorted (GList *dirents)\n{\n    GList *ptr;\n    SeafDirent *dent, *dent_n;\n    gboolean ret = TRUE;\n\n    for (ptr = dirents; ptr != NULL; ptr = ptr->next) {\n        dent = ptr->data;\n        if (!ptr->next)\n            break;\n        dent_n = ptr->next->data;\n\n        /* If dirents are not sorted in descending order, return FALSE. */\n        if (strcmp (dent->name, dent_n->name) < 0) {\n            ret = FALSE;\n            break;\n        }\n    }\n\n    return ret;\n}\n\nSeafDir *\nseaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *dir_id)\n{\n    SeafDir *dir = seaf_fs_manager_get_seafdir(mgr, repo_id, version, dir_id);\n\n    if (!dir)\n        return NULL;\n\n    /* Only some very old dir objects are not sorted. */\n    if (version > 0)\n        return dir;\n\n    if (!is_dirents_sorted (dir->entries))\n        dir->entries = g_list_sort (dir->entries, compare_dirents);\n\n    return dir;\n}\n\nSeafDir *\nseaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr,\n                                            const char *repo_id,\n                                            int version,\n                                            const char *root_id,\n                                            const char *path)\n{\n    SeafDir *dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id,\n                                                        version, root_id,\n                                                        path, NULL);\n\n    if (!dir)\n        return NULL;\n\n    /* Only some very old dir objects are not sorted. */\n    if (version > 0)\n        return dir;\n\n    if (!is_dirents_sorted (dir->entries))\n        dir->entries = g_list_sort (dir->entries, compare_dirents);\n\n    return dir;\n}\n\nstatic int\nparse_metadata_type_v0 (const uint8_t *data, int len)\n{\n    const uint8_t *ptr = data;\n\n    if (len < sizeof(guint32))\n        return SEAF_METADATA_TYPE_INVALID;\n\n    return (int)(get32bit(&ptr));\n}\n\nstatic int\nparse_metadata_type_json (const char *obj_id, uint8_t *data, int len)\n{\n    guint8 *decompressed;\n    int outlen;\n    json_t *object;\n    json_error_t error;\n    int type;\n\n    if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {\n        seaf_warning (\"Failed to decompress fs object %s.\\n\", obj_id);\n        return SEAF_METADATA_TYPE_INVALID;\n    }\n\n    object = json_loadb ((const char *)decompressed, outlen, 0, &error);\n    g_free (decompressed);\n    if (!object) {\n        if (error.text)\n            seaf_warning (\"Failed to load fs json object: %s.\\n\", error.text);\n        else\n            seaf_warning (\"Failed to load fs json object.\\n\");\n        return SEAF_METADATA_TYPE_INVALID;\n    }\n\n    type = json_object_get_int_member (object, \"type\");\n\n    json_decref (object);\n    return type;\n}\n\nint\nseaf_metadata_type_from_data (const char *obj_id,\n                              uint8_t *data, int len, gboolean is_json)\n{\n    if (is_json)\n        return parse_metadata_type_json (obj_id, data, len);\n    else\n        return parse_metadata_type_v0 (data, len);\n}\n\nSeafFSObject *\nfs_object_from_v0_data (const char *obj_id, const uint8_t *data, int len)\n{\n    int type = parse_metadata_type_v0 (data, len);\n\n    if (type == SEAF_METADATA_TYPE_FILE)\n        return (SeafFSObject *)seafile_from_v0_data (obj_id, data, len);\n    else if (type == SEAF_METADATA_TYPE_DIR)\n        return (SeafFSObject *)seaf_dir_from_v0_data (obj_id, data, len);\n    else {\n        seaf_warning (\"Invalid object type %d.\\n\", type);\n        return NULL;\n    }\n}\n\nSeafFSObject *\nfs_object_from_json (const char *obj_id, uint8_t *data, int len)\n{\n    guint8 *decompressed;\n    int outlen;\n    json_t *object;\n    json_error_t error;\n    int type;\n    SeafFSObject *fs_obj;\n\n    if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {\n        seaf_warning (\"Failed to decompress fs object %s.\\n\", obj_id);\n        return NULL;\n    }\n\n    object = json_loadb ((const char *)decompressed, outlen, 0, &error);\n    g_free (decompressed);\n    if (!object) {\n        if (error.text)\n            seaf_warning (\"Failed to load fs json object: %s.\\n\", error.text);\n        else\n            seaf_warning (\"Failed to load fs json object.\\n\");\n        return NULL;\n    }\n\n    type = json_object_get_int_member (object, \"type\");\n\n    if (type == SEAF_METADATA_TYPE_FILE)\n        fs_obj = (SeafFSObject *)seafile_from_json_object (obj_id, object);\n    else if (type == SEAF_METADATA_TYPE_DIR)\n        fs_obj = (SeafFSObject *)seaf_dir_from_json_object (obj_id, object);\n    else {\n        seaf_warning (\"Invalid fs type %d.\\n\", type);\n        json_decref (object);\n        return NULL;\n    }\n\n    json_decref (object);\n\n    return fs_obj;\n}\n\nSeafFSObject *\nseaf_fs_object_from_data (const char *obj_id,\n                          uint8_t *data, int len,\n                          gboolean is_json)\n{\n    if (is_json)\n        return fs_object_from_json (obj_id, data, len);\n    else\n        return fs_object_from_v0_data (obj_id, data, len);\n}\n\nvoid\nseaf_fs_object_free (SeafFSObject *obj)\n{\n    if (!obj)\n        return;\n\n    if (obj->type == SEAF_METADATA_TYPE_FILE)\n        seafile_unref ((Seafile *)obj);\n    else if (obj->type == SEAF_METADATA_TYPE_DIR)\n        seaf_dir_free ((SeafDir *)obj);\n}\n\nBlockList *\nblock_list_new ()\n{\n    BlockList *bl = g_new0 (BlockList, 1);\n\n    bl->block_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n    bl->block_ids = g_ptr_array_new_with_free_func (g_free);\n\n    return bl;\n}\n\nvoid\nblock_list_free (BlockList *bl)\n{\n    if (bl->block_hash)\n        g_hash_table_destroy (bl->block_hash);\n    g_ptr_array_free (bl->block_ids, TRUE);\n    g_free (bl);\n}\n\nvoid\nblock_list_insert (BlockList *bl, const char *block_id)\n{\n    if (g_hash_table_lookup (bl->block_hash, block_id))\n        return;\n\n    char *key = g_strdup(block_id);\n    g_hash_table_replace (bl->block_hash, key, key);\n    g_ptr_array_add (bl->block_ids, g_strdup(block_id));\n    ++bl->n_blocks;\n}\n\nBlockList *\nblock_list_difference (BlockList *bl1, BlockList *bl2)\n{\n    BlockList *bl;\n    int i;\n    char *block_id;\n    char *key;\n\n    bl = block_list_new ();\n\n    for (i = 0; i < bl1->block_ids->len; ++i) {\n        block_id = g_ptr_array_index (bl1->block_ids, i);\n        if (g_hash_table_lookup (bl2->block_hash, block_id) == NULL) {\n            key = g_strdup(block_id);\n            g_hash_table_replace (bl->block_hash, key, key);\n            g_ptr_array_add (bl->block_ids, g_strdup(block_id));\n            ++bl->n_blocks;\n        }\n    }\n\n    return bl;\n}\n\nstatic int\ntraverse_file (SeafFSManager *mgr,\n               const char *repo_id,\n               int version,\n               const char *id,\n               TraverseFSTreeCallback callback,\n               void *user_data,\n               gboolean skip_errors)\n{\n    gboolean stop = FALSE;\n\n    if (memcmp (id, EMPTY_SHA1, 40) == 0)\n        return 0;\n\n    if (!callback (mgr, repo_id, version, id, SEAF_METADATA_TYPE_FILE, user_data, &stop) &&\n        !skip_errors)\n        return -1;\n\n    return 0;\n}\n\nstatic int\ntraverse_dir (SeafFSManager *mgr,\n              const char *repo_id,\n              int version,\n              const char *id,\n              TraverseFSTreeCallback callback,\n              void *user_data,\n              gboolean skip_errors)\n{\n    SeafDir *dir;\n    GList *p;\n    SeafDirent *seaf_dent;\n    gboolean stop = FALSE;\n\n    if (!callback (mgr, repo_id, version,\n                   id, SEAF_METADATA_TYPE_DIR, user_data, &stop) &&\n        !skip_errors)\n        return -1;\n\n    if (stop)\n        return 0;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir) {\n        seaf_warning (\"[fs-mgr]get seafdir %s failed\\n\", id);\n        if (skip_errors)\n            return 0;\n        return -1;\n    }\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n\n        if (S_ISREG(seaf_dent->mode)) {\n            if (traverse_file (mgr, repo_id, version, seaf_dent->id,\n                               callback, user_data, skip_errors) < 0) {\n                if (!skip_errors) {\n                    seaf_dir_free (dir);\n                    return -1;\n                }\n            }\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            if (traverse_dir (mgr, repo_id, version, seaf_dent->id,\n                              callback, user_data, skip_errors) < 0) {\n                if (!skip_errors) {\n                    seaf_dir_free (dir);\n                    return -1;\n                }\n            }\n        }\n    }\n\n    seaf_dir_free (dir);\n    return 0;\n}\n\nint\nseaf_fs_manager_traverse_tree (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *root_id,\n                               TraverseFSTreeCallback callback,\n                               void *user_data,\n                               gboolean skip_errors)\n{\n    if (strcmp (root_id, EMPTY_SHA1) == 0) {\n        return 0;\n    }\n    return traverse_dir (mgr, repo_id, version, root_id, callback, user_data, skip_errors);\n}\n\nstatic int\ntraverse_dir_path (SeafFSManager *mgr,\n                   const char *repo_id,\n                   int version,\n                   const char *dir_path,\n                   SeafDirent *dent,\n                   TraverseFSPathCallback callback,\n                   void *user_data)\n{\n    SeafDir *dir;\n    GList *p;\n    SeafDirent *seaf_dent;\n    gboolean stop = FALSE;\n    char *sub_path;\n    int ret = 0;\n\n    if (!callback (mgr, dir_path, dent, user_data, &stop))\n        return -1;\n\n    if (stop)\n        return 0;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dent->id);\n    if (!dir) {\n        seaf_warning (\"get seafdir %s:%s failed\\n\", repo_id, dent->id);\n        return -1;\n    }\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n        sub_path = g_strconcat (dir_path, \"/\", seaf_dent->name, NULL);\n\n        if (S_ISREG(seaf_dent->mode)) {\n            if (!callback (mgr, sub_path, seaf_dent, user_data, &stop)) {\n                g_free (sub_path);\n                ret = -1;\n                break;\n            }\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            if (traverse_dir_path (mgr, repo_id, version, sub_path, seaf_dent,\n                                   callback, user_data) < 0) {\n                g_free (sub_path);\n                ret = -1;\n                break;\n            }\n        }\n        g_free (sub_path);\n    }\n\n    seaf_dir_free (dir);\n    return ret;\n}\n\nint\nseaf_fs_manager_traverse_path (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *root_id,\n                               const char *dir_path,\n                               TraverseFSPathCallback callback,\n                               void *user_data)\n{\n    SeafDirent *dent;\n    int ret = 0;\n\n    dent = seaf_fs_manager_get_dirent_by_path (mgr, repo_id, version,\n                                               root_id, dir_path, NULL);\n    if (!dent) {\n        seaf_warning (\"Failed to get dirent for %.8s:%s.\\n\", repo_id, dir_path);\n        return -1;\n    }\n\n    ret = traverse_dir_path (mgr, repo_id, version, dir_path, dent,\n                             callback, user_data);\n\n    seaf_dirent_free (dent);\n    return ret;\n}\n\nstatic gboolean\nfill_blocklist (SeafFSManager *mgr,\n                const char *repo_id, int version,\n                const char *obj_id, int type,\n                void *user_data, gboolean *stop)\n{\n    BlockList *bl = user_data;\n    Seafile *seafile;\n    int i;\n\n    if (type == SEAF_METADATA_TYPE_FILE) {\n        seafile = seaf_fs_manager_get_seafile (mgr, repo_id, version, obj_id);\n        if (!seafile) {\n            seaf_warning (\"[fs mgr] Failed to find file %s.\\n\", obj_id);\n            return FALSE;\n        }\n\n        for (i = 0; i < seafile->n_blocks; ++i)\n            block_list_insert (bl, seafile->blk_sha1s[i]);\n\n        seafile_unref (seafile);\n    }\n\n    return TRUE;\n}\n\nint\nseaf_fs_manager_populate_blocklist (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *root_id,\n                                    BlockList *bl)\n{\n    return seaf_fs_manager_traverse_tree (mgr, repo_id, version, root_id,\n                                          fill_blocklist,\n                                          bl, FALSE);\n}\n\ngboolean\nseaf_fs_manager_object_exists (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *id)\n{\n    /* Empty file and dir always exists. */\n    if (memcmp (id, EMPTY_SHA1, 40) == 0)\n        return TRUE;\n\n    return seaf_obj_store_obj_exists (mgr->obj_store, repo_id, version, id);\n}\n\nvoid\nseaf_fs_manager_delete_object (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *id)\n{\n    seaf_obj_store_delete_obj (mgr->obj_store, repo_id, version, id);\n}\n\ngint64\nseaf_fs_manager_get_file_size (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *file_id)\n{\n    Seafile *file;\n    gint64 file_size;\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo_id, version, file_id);\n    if (!file) {\n        seaf_warning (\"Couldn't get file %s:%s\\n\", repo_id, file_id);\n        return -1;\n    }\n\n    file_size = file->file_size;\n\n    seafile_unref (file);\n    return file_size;\n}\n\nstatic gint64\nget_dir_size (SeafFSManager *mgr, const char *repo_id, int version, const char *id)\n{\n    SeafDir *dir;\n    SeafDirent *seaf_dent;\n    guint64 size = 0;\n    gint64 result;\n    GList *p;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir)\n        return -1;\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n\n        if (S_ISREG(seaf_dent->mode)) {\n            if (dir->version > 0)\n                result = seaf_dent->size;\n            else {\n                result = seaf_fs_manager_get_file_size (mgr,\n                                                        repo_id,\n                                                        version,\n                                                        seaf_dent->id);\n                if (result < 0) {\n                    seaf_dir_free (dir);\n                    return result;\n                }\n            }\n            size += result;\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            result = get_dir_size (mgr, repo_id, version, seaf_dent->id);\n            if (result < 0) {\n                seaf_dir_free (dir);\n                return result;\n            }\n            size += result;\n        }\n    }\n\n    seaf_dir_free (dir);\n    return size;\n}\n\ngint64\nseaf_fs_manager_get_fs_size (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *root_id)\n{\n     if (strcmp (root_id, EMPTY_SHA1) == 0)\n        return 0;\n     return get_dir_size (mgr, repo_id, version, root_id);\n}\n\nstatic int\ncount_dir_files (SeafFSManager *mgr, const char *repo_id, int version, const char *id)\n{\n    SeafDir *dir;\n    SeafDirent *seaf_dent;\n    int count = 0;\n    int result;\n    GList *p;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir)\n        return -1;\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n\n        if (S_ISREG(seaf_dent->mode)) {\n            count ++;\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            result = count_dir_files (mgr, repo_id, version, seaf_dent->id);\n            if (result < 0) {\n                seaf_dir_free (dir);\n                return result;\n            }\n            count += result;\n        }\n    }\n\n    seaf_dir_free (dir);\n    return count;\n}\n\nstatic int\nget_file_count_info (SeafFSManager *mgr,\n                     const char *repo_id,\n                     int version,\n                     const char *id,\n                     gint64 *dir_count,\n                     gint64 *file_count,\n                     gint64 *size)\n{\n    SeafDir *dir;\n    SeafDirent *seaf_dent;\n    GList *p;\n    int ret = 0;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir)\n        return -1;\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n\n        if (S_ISREG(seaf_dent->mode)) {\n            (*file_count)++;\n            if (version > 0)\n                (*size) += seaf_dent->size;\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            (*dir_count)++;\n            ret = get_file_count_info (mgr, repo_id, version, seaf_dent->id,\n                                       dir_count, file_count, size);\n        }\n    }\n    seaf_dir_free (dir);\n\n    return ret;\n}\n\nint\nseaf_fs_manager_count_fs_files (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *root_id)\n{\n     if (strcmp (root_id, EMPTY_SHA1) == 0)\n        return 0;\n     return count_dir_files (mgr, repo_id, version, root_id);\n}\n\nSeafDir *\nseaf_fs_manager_get_seafdir_by_path (SeafFSManager *mgr,\n                                     const char *repo_id,\n                                     int version,\n                                     const char *root_id,\n                                     const char *path,\n                                     GError **error)\n{\n    SeafDir *dir;\n    SeafDirent *dent;\n    const char *dir_id = root_id;\n    char *name, *saveptr;\n    char *tmp_path = g_strdup(path);\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id);\n    if (!dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, \"directory is missing\");\n        g_free (tmp_path);\n        return NULL;\n    }\n\n    name = strtok_r (tmp_path, \"/\", &saveptr);\n    while (name != NULL) {\n        GList *l;\n        for (l = dir->entries; l != NULL; l = l->next) {\n            dent = l->data;\n\n            if (strcmp(dent->name, name) == 0 && S_ISDIR(dent->mode)) {\n                dir_id = dent->id;\n                break;\n            }\n        }\n\n        if (!l) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST,\n                         \"Path does not exists %s\", path);\n            seaf_dir_free (dir);\n            dir = NULL;\n            break;\n        }\n\n        SeafDir *prev = dir;\n        dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, dir_id);\n        seaf_dir_free (prev);\n\n        if (!dir) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING,\n                         \"directory is missing\");\n            break;\n        }\n\n        name = strtok_r (NULL, \"/\", &saveptr);\n    }\n\n    g_free (tmp_path);\n    return dir;\n}\n\nchar *\nseaf_fs_manager_path_to_obj_id (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *root_id,\n                                const char *path,\n                                guint32 *mode,\n                                GError **error)\n{\n    char *copy = g_strdup (path);\n    int off = strlen(copy) - 1;\n    char *slash, *name;\n    SeafDir *base_dir = NULL;\n    SeafDirent *dent;\n    GList *p;\n    char *obj_id = NULL;\n\n    while (off >= 0 && copy[off] == '/')\n        copy[off--] = 0;\n\n    if (strlen(copy) == 0) {\n        /* the path is root \"/\" */\n        if (mode) {\n            *mode = S_IFDIR;\n        }\n        obj_id = g_strdup(root_id);\n        goto out;\n    }\n\n    slash = strrchr (copy, '/');\n    if (!slash) {\n        base_dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id);\n        if (!base_dir) {\n            seaf_warning (\"Failed to find root dir %s.\\n\", root_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \" \");\n            goto out;\n        }\n        name = copy;\n    } else {\n        *slash = 0;\n        name = slash + 1;\n        GError *tmp_error = NULL;\n        base_dir = seaf_fs_manager_get_seafdir_by_path (mgr,\n                                                        repo_id,\n                                                        version,\n                                                        root_id,\n                                                        copy,\n                                                        &tmp_error);\n        if (tmp_error &&\n            !g_error_matches(tmp_error,\n                             SEAFILE_DOMAIN,\n                             SEAF_ERR_PATH_NO_EXIST)) {\n            seaf_warning (\"Failed to get dir for %s.\\n\", copy);\n            g_propagate_error (error, tmp_error);\n            goto out;\n        }\n\n        /* The path doesn't exist in this commit. */\n        if (!base_dir) {\n            g_propagate_error (error, tmp_error);\n            goto out;\n        }\n    }\n\n    for (p = base_dir->entries; p != NULL; p = p->next) {\n        dent = p->data;\n\n        if (!is_object_id_valid (dent->id))\n            continue;\n\n        if (strcmp (dent->name, name) == 0) {\n            obj_id = g_strdup (dent->id);\n            if (mode) {\n                *mode = dent->mode;\n            }\n            break;\n        }\n    }\n\nout:\n    if (base_dir)\n        seaf_dir_free (base_dir);\n    g_free (copy);\n    return obj_id;\n}\n\nchar *\nseaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr,\n                                        const char *repo_id,\n                                        int version,\n                                        const char *root_id,\n                                        const char *path,\n                                        GError **error)\n{\n    guint32 mode;\n    char *file_id;\n\n    file_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version,\n                                              root_id, path, &mode, error);\n\n    if (!file_id)\n        return NULL;\n\n    if (file_id && S_ISDIR(mode)) {\n        g_free (file_id);\n        return NULL;\n    }\n\n    return file_id;\n}\n\nchar *\nseaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr,\n                                        const char *repo_id,\n                                        int version,\n                                        const char *root_id,\n                                        const char *path,\n                                        GError **error)\n{\n    guint32 mode = 0;\n    char *dir_id;\n\n    dir_id = seaf_fs_manager_path_to_obj_id (mgr, repo_id, version,\n                                             root_id, path, &mode, error);\n\n    if (!dir_id)\n        return NULL;\n\n    if (dir_id && !S_ISDIR(mode)) {\n        g_free (dir_id);\n        return NULL;\n    }\n\n    return dir_id;\n}\n\nSeafDirent *\nseaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *root_id,\n                                    const char *path,\n                                    GError **error)\n{\n    SeafDirent *dent = NULL;\n    SeafDir *dir = NULL;\n    char *parent_dir = NULL;\n    char *file_name = NULL;\n\n    parent_dir  = g_path_get_dirname(path);\n    file_name = g_path_get_basename(path);\n\n    if (strcmp (parent_dir, \".\") == 0) {\n        dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, root_id);\n        if (!dir) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, \"directory is missing\");\n        }\n    } else\n        dir = seaf_fs_manager_get_seafdir_by_path (mgr, repo_id, version,\n                                                   root_id, parent_dir, error);\n\n    if (!dir) {\n        goto out;\n    }\n\n    GList *p;\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        if (strcmp (d->name, file_name) == 0) {\n            dent = seaf_dirent_dup(d);\n            break;\n        }\n    }\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n    g_free (parent_dir);\n    g_free (file_name);\n\n    return dent;\n}\n\nstatic gboolean\nverify_seafdir_v0 (const char *dir_id, const uint8_t *data, int len,\n                   gboolean verify_id)\n{\n    guint32 meta_type;\n    guint32 mode;\n    char id[41];\n    guint32 name_len;\n    char name[SEAF_DIR_NAME_LEN];\n    const uint8_t *ptr;\n    int remain;\n    int dirent_base_size;\n    SHA_CTX ctx;\n    uint8_t sha1[20];\n    char check_id[41];\n\n    if (len < sizeof(SeafdirOndisk)) {\n        seaf_warning (\"[fs mgr] Corrupt seafdir object %s.\\n\", dir_id);\n        return FALSE;\n    }\n\n    ptr = data;\n    remain = len;\n\n    meta_type = get32bit (&ptr);\n    remain -= 4;\n    if (meta_type != SEAF_METADATA_TYPE_DIR) {\n        seaf_warning (\"Data does not contain a directory.\\n\");\n        return FALSE;\n    }\n\n    if (verify_id)\n        SHA1_Init (&ctx);\n\n    dirent_base_size = 2 * sizeof(guint32) + 40;\n    while (remain > dirent_base_size) {\n        mode = get32bit (&ptr);\n        memcpy (id, ptr, 40);\n        id[40] = '\\0';\n        ptr += 40;\n        name_len = get32bit (&ptr);\n        remain -= dirent_base_size;\n        if (remain >= name_len) {\n            name_len = MIN (name_len, SEAF_DIR_NAME_LEN - 1);\n            memcpy (name, ptr, name_len);\n            ptr += name_len;\n            remain -= name_len;\n        } else {\n            seaf_warning (\"Bad data format for dir objcet %s.\\n\", dir_id);\n            return FALSE;\n        }\n\n        if (verify_id) {\n            /* Convert mode to little endian before compute. */\n            if (G_BYTE_ORDER == G_BIG_ENDIAN)\n                mode = GUINT32_SWAP_LE_BE (mode);\n\n            SHA1_Update (&ctx, id, 40);\n            SHA1_Update (&ctx, name, name_len);\n            SHA1_Update (&ctx, &mode, sizeof(mode));\n        }\n    }\n\n    if (!verify_id)\n        return TRUE;\n\n    SHA1_Final (sha1, &ctx);\n    rawdata_to_hex (sha1, check_id, 20);\n\n    if (strcmp (check_id, dir_id) == 0)\n        return TRUE;\n    else\n        return FALSE;\n}\n\nstatic gboolean\nverify_fs_object_json (const char *obj_id, uint8_t *data, int len)\n{\n    guint8 *decompressed;\n    int outlen;\n    unsigned char sha1[20];\n    char hex[41];\n\n    if (seaf_decompress (data, len, &decompressed, &outlen) < 0) {\n        seaf_warning (\"Failed to decompress fs object %s.\\n\", obj_id);\n        return FALSE;\n    }\n\n    calculate_sha1 (sha1, (const char *)decompressed, outlen);\n    rawdata_to_hex (sha1, hex, 20);\n\n    g_free (decompressed);\n    return (strcmp(hex, obj_id) == 0);\n}\n\nstatic gboolean\nverify_seafdir (const char *dir_id, uint8_t *data, int len,\n                gboolean verify_id, gboolean is_json)\n{\n    if (is_json)\n        return verify_fs_object_json (dir_id, data, len);\n    else\n        return verify_seafdir_v0 (dir_id, data, len, verify_id);\n}\n                                        \ngboolean\nseaf_fs_manager_verify_seafdir (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *dir_id,\n                                gboolean verify_id,\n                                gboolean *io_error)\n{\n    void *data;\n    int len;\n\n    if (memcmp (dir_id, EMPTY_SHA1, 40) == 0) {\n        return TRUE;\n    }\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 dir_id, &data, &len) < 0) {\n        seaf_warning (\"[fs mgr] Failed to read dir %s:%s.\\n\", repo_id, dir_id);\n        *io_error = TRUE;\n        return FALSE;\n    }\n\n    gboolean ret = verify_seafdir (dir_id, data, len, verify_id, (version > 0));\n    g_free (data);\n\n    return ret;\n}\n\nstatic gboolean\nverify_seafile_v0 (const char *id, const void *data, int len, gboolean verify_id)\n{\n    const SeafileOndisk *ondisk = data;\n    SHA_CTX ctx;\n    uint8_t sha1[20];\n    char check_id[41];\n\n    if (len < sizeof(SeafileOndisk)) {\n        seaf_warning (\"[fs mgr] Corrupt seafile object %s.\\n\", id);\n        return FALSE;\n    }\n\n    if (ntohl(ondisk->type) != SEAF_METADATA_TYPE_FILE) {\n        seaf_warning (\"[fd mgr] %s is not a file.\\n\", id);\n        return FALSE;\n    }\n\n    int id_list_length = len - sizeof(SeafileOndisk);\n    if (id_list_length % 20 != 0) {\n        seaf_warning (\"[fs mgr] Bad seafile id list length %d.\\n\", id_list_length);\n        return FALSE;\n    }\n\n    if (!verify_id)\n        return TRUE;\n\n    SHA1_Init (&ctx);\n    SHA1_Update (&ctx, ondisk->block_ids, len - sizeof(SeafileOndisk));\n    SHA1_Final (sha1, &ctx);\n\n    rawdata_to_hex (sha1, check_id, 20);\n\n    if (strcmp (check_id, id) == 0)\n        return TRUE;\n    else\n        return FALSE;\n}\n\nstatic gboolean\nverify_seafile (const char *id, void *data, int len,\n                gboolean verify_id, gboolean is_json)\n{\n    if (is_json)\n        return verify_fs_object_json (id, data, len);\n    else\n        return verify_seafile_v0 (id, data, len, verify_id);\n}\n\ngboolean\nseaf_fs_manager_verify_seafile (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *file_id,\n                                gboolean verify_id,\n                                gboolean *io_error)\n{\n    void *data;\n    int len;\n\n    if (memcmp (file_id, EMPTY_SHA1, 40) == 0) {\n        return TRUE;\n    }\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 file_id, &data, &len) < 0) {\n        seaf_warning (\"[fs mgr] Failed to read file %s:%s.\\n\", repo_id, file_id);\n        *io_error = TRUE;\n        return FALSE;\n    }\n\n    gboolean ret = verify_seafile (file_id, data, len, verify_id, (version > 0));\n    g_free (data);\n\n    return ret;\n}\n\nstatic gboolean\nverify_fs_object_v0 (const char *obj_id,\n                     uint8_t *data,\n                     int len,\n                     gboolean verify_id)\n{\n    gboolean ret = TRUE;\n\n    int type = seaf_metadata_type_from_data (obj_id, data, len, FALSE);\n    switch (type) {\n    case SEAF_METADATA_TYPE_FILE:\n        ret = verify_seafile_v0 (obj_id, data, len, verify_id);\n        break;\n    case SEAF_METADATA_TYPE_DIR:\n        ret = verify_seafdir_v0 (obj_id, data, len, verify_id);\n        break;\n    default:\n        seaf_warning (\"Invalid meta data type: %d.\\n\", type);\n        return FALSE;\n    }\n\n    return ret;\n}\n\ngboolean\nseaf_fs_manager_verify_object (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *obj_id,\n                               gboolean verify_id,\n                               gboolean *io_error)\n{\n    void *data;\n    int len;\n    gboolean ret = TRUE;\n\n    if (memcmp (obj_id, EMPTY_SHA1, 40) == 0) {\n        return TRUE;\n    }\n\n    if (seaf_obj_store_read_obj (mgr->obj_store, repo_id, version,\n                                 obj_id, &data, &len) < 0) {\n        seaf_warning (\"[fs mgr] Failed to read object %s:%s.\\n\", repo_id, obj_id);\n        *io_error = TRUE;\n        return FALSE;\n    }\n\n    if (version == 0)\n        ret = verify_fs_object_v0 (obj_id, data, len, verify_id);\n    else\n        ret = verify_fs_object_json (obj_id, data, len);\n\n    g_free (data);\n    return ret;\n}\n\nint\ndir_version_from_repo_version (int repo_version)\n{\n    if (repo_version == 0)\n        return 0;\n    else\n        return CURRENT_DIR_OBJ_VERSION;\n}\n\nint\nseafile_version_from_repo_version (int repo_version)\n{\n    if (repo_version == 0)\n        return 0;\n    else\n        return CURRENT_SEAFILE_OBJ_VERSION;\n}\n\nint\nseaf_fs_manager_remove_store (SeafFSManager *mgr,\n                              const char *store_id)\n{\n    return seaf_obj_store_remove_store (mgr->obj_store, store_id);\n}\n\nGObject *\nseaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr,\n                                             const char *repo_id,\n                                             int version,\n                                             const char *root_id,\n                                             const char *path,\n                                             GError **error)\n{\n    char *dir_id = NULL;\n    gint64 file_count = 0, dir_count = 0, size = 0;\n    SeafileFileCountInfo *info = NULL;\n\n    dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr,\n                                                     repo_id,\n                                                     version,\n                                                     root_id,\n                                                     path, NULL);\n    if (!dir_id) {\n        seaf_warning (\"Path %s doesn't exist or is not a dir in repo %.10s.\\n\",\n                      path, repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad path\");\n        goto out;\n    }\n    if (get_file_count_info (mgr, repo_id, version,\n                             dir_id, &dir_count, &file_count, &size) < 0) {\n        seaf_warning (\"Failed to get count info from path %s in repo %.10s.\\n\",\n                      path, repo_id);\n        goto out;\n    }\n    info = g_object_new (SEAFILE_TYPE_FILE_COUNT_INFO,\n                         \"file_count\", file_count,\n                         \"dir_count\", dir_count,\n                         \"size\", size, NULL);\nout:\n    g_free (dir_id);\n\n    return (GObject *)info;\n}\n\nstatic int\nsearch_files_recursive (SeafFSManager *mgr,\n                        const char *repo_id,\n                        const char *path,\n                        const char *id,\n                        const char *str,\n                        int version,\n                        GList **file_list)\n{\n    SeafDir *dir;\n    GList *p;\n    SeafDirent *seaf_dent;\n    int ret = 0;\n    char *full_path = NULL;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir) {\n        seaf_warning (\"[fs-mgr]get seafdir %s failed\\n\", id);\n        return -1;\n    }\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = (SeafDirent *)p->data;\n        full_path = g_strconcat (path, \"/\", seaf_dent->name, NULL);\n\n        if (seaf_dent->name && strcasestr (seaf_dent->name, str) != NULL) {\n            SearchResult *sr = g_new0(SearchResult, 1);\n            sr->path = g_strdup (full_path);\n            sr->size = seaf_dent->size;\n            sr->mtime = seaf_dent->mtime;\n            *file_list = g_list_prepend (*file_list, sr);\n            if (S_ISDIR(seaf_dent->mode)) {\n                sr->is_dir = TRUE;\n            }\n        }\n\n        if (S_ISDIR(seaf_dent->mode)) {\n            if (search_files_recursive (mgr, repo_id, full_path,\n                                        seaf_dent->id, str,\n                                        version, file_list) < 0) {\n                g_free (full_path);\n                ret = -1;\n                break;\n            }\n        }\n\n        g_free (full_path);\n    }\n\n    seaf_dir_free (dir);\n    return ret;\n}\n\nGList *\nseaf_fs_manager_search_files_by_path  (SeafFSManager *mgr,\n                                       const char *repo_id,\n                                       const char *path,\n                                       const char *str)\n{\n    GList *file_list = NULL;\n    SeafCommit *head = NULL;\n\n    SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to find repo %s\\n\", repo_id);\n        goto out;\n    }\n\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,repo->id, repo->version, repo->head->commit_id);\n    if (!head) {\n        seaf_warning (\"Failed to find commit %s\\n\", repo->head->commit_id);\n        goto out;\n    }\n\n    if (!path || g_strcmp0 (path, \"/\") == 0) {\n        search_files_recursive (mgr, repo->store_id, \"\", head->root_id,\n                                str, repo->version, &file_list);\n    } else {\n        char *dir_id = seaf_fs_manager_get_seafdir_id_by_path (mgr, repo->store_id, repo->version,\n                                                               head->root_id, path, NULL);\n        if (!dir_id) {\n            seaf_warning (\"Path %s doesn't exist or is not a dir in repo %.10s.\\n\", path, repo->store_id);\n            goto out;\n        }\n        search_files_recursive (mgr, repo->store_id, path, dir_id,\n                                str, repo->version, &file_list);\n        g_free (dir_id);\n    }\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (head);\n    return file_list;\n}\n"
  },
  {
    "path": "common/fs-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_FILE_MGR_H\n#define SEAF_FILE_MGR_H\n\n#include <glib.h>\n\n#include \"seafile-object.h\"\n\n#include \"obj-store.h\"\n\n#include \"cdc/cdc.h\"\n#include \"../common/seafile-crypt.h\"\n\n#define CURRENT_DIR_OBJ_VERSION 1\n#define CURRENT_SEAFILE_OBJ_VERSION 1\n\ntypedef struct _SeafFSManager SeafFSManager;\ntypedef struct _SeafFSObject SeafFSObject;\ntypedef struct _Seafile Seafile;\ntypedef struct _SeafDir SeafDir;\ntypedef struct _SeafDirent SeafDirent;\n\ntypedef enum {\n    SEAF_METADATA_TYPE_INVALID,\n    SEAF_METADATA_TYPE_FILE,\n    SEAF_METADATA_TYPE_LINK,\n    SEAF_METADATA_TYPE_DIR,\n} SeafMetadataType;\n\n/* Common to seafile and seafdir objects. */\nstruct _SeafFSObject {\n    int type;\n};\n\nstruct _Seafile {\n    SeafFSObject object;\n    int         version;\n    char        file_id[41];\n    guint64     file_size;\n    guint32     n_blocks;\n    char        **blk_sha1s;\n    int         ref_count;\n};\n\ntypedef struct SearchResult {\n    char *path;\n    gint64 size;\n    gint64 mtime;\n    gboolean is_dir;\n} SearchResult;\n\nvoid\nseafile_ref (Seafile *seafile);\n\nvoid\nseafile_unref (Seafile *seafile);\n\nint\nseafile_save (SeafFSManager *fs_mgr,\n              const char *repo_id,\n              int version,\n              Seafile *file);\n\n#define SEAF_DIR_NAME_LEN 256\n\nstruct _SeafDirent {\n    int        version;\n    guint32    mode;\n    char       id[41];\n    guint32    name_len;\n    char       *name;\n\n    /* attributes for version > 0 */\n    gint64     mtime;\n    char       *modifier;       /* for files only */\n    gint64     size;            /* for files only */\n};\n\nstruct _SeafDir {\n    SeafFSObject object;\n    int    version;\n    char   dir_id[41];\n    GList *entries;\n\n    /* data in on-disk format. */\n    void  *ondisk;\n    int    ondisk_size;\n};\n\nSeafDir *\nseaf_dir_new (const char *id, GList *entries, int version);\n\nvoid \nseaf_dir_free (SeafDir *dir);\n\nSeafDir *\nseaf_dir_from_data (const char *dir_id, uint8_t *data, int len,\n                    gboolean is_json);\n\nvoid *\nseaf_dir_to_data (SeafDir *dir, int *len);\n\nint \nseaf_dir_save (SeafFSManager *fs_mgr,\n               const char *repo_id,\n               int version,\n               SeafDir *dir);\n\nSeafDirent *\nseaf_dirent_new (int version, const char *sha1, int mode, const char *name,\n                 gint64 mtime, const char *modifier, gint64 size);\n\nvoid\nseaf_dirent_free (SeafDirent *dent);\n\nSeafDirent *\nseaf_dirent_dup (SeafDirent *dent);\n\nint\nseaf_metadata_type_from_data (const char *obj_id,\n                              uint8_t *data, int len, gboolean is_json);\n\n/* Parse an fs object without knowing its type. */\nSeafFSObject *\nseaf_fs_object_from_data (const char *obj_id,\n                          uint8_t *data, int len,\n                          gboolean is_json);\n\nvoid\nseaf_fs_object_free (SeafFSObject *obj);\n\ntypedef struct {\n    /* TODO: GHashTable may be inefficient when we have large number of IDs. */\n    GHashTable  *block_hash;\n    GPtrArray   *block_ids;\n    uint32_t     n_blocks;\n    uint32_t     n_valid_blocks;\n} BlockList;\n\nBlockList *\nblock_list_new ();\n\nvoid\nblock_list_free (BlockList *bl);\n\nvoid\nblock_list_insert (BlockList *bl, const char *block_id);\n\n/* Return a blocklist containing block ids which are in @bl1 but\n * not in @bl2.\n */\nBlockList *\nblock_list_difference (BlockList *bl1, BlockList *bl2);\n\nstruct _SeafileSession;\n\ntypedef struct _SeafFSManagerPriv SeafFSManagerPriv;\n\nstruct _SeafFSManager {\n    struct _SeafileSession *seaf;\n\n    struct SeafObjStore *obj_store;\n\n    SeafFSManagerPriv *priv;\n};\n\nSeafFSManager *\nseaf_fs_manager_new (struct _SeafileSession *seaf,\n                     const char *seaf_dir);\n\nint\nseaf_fs_manager_init (SeafFSManager *mgr);\n\n#ifndef SEAFILE_SERVER\n\nint \nseaf_fs_manager_checkout_file (SeafFSManager *mgr, \n                               const char *repo_id,\n                               int version,\n                               const char *file_id, \n                               const char *file_path,\n                               guint32 mode,\n                               guint64 mtime,\n                               struct SeafileCrypt *crypt,\n                               const char *in_repo_path,\n                               const char *conflict_head_id,\n                               gboolean force_conflict,\n                               gboolean *conflicted,\n                               const char *email);\n\n#endif  /* not SEAFILE_SERVER */\n\n/**\n * Check in blocks and create seafile/symlink object.\n * Returns sha1 id for the seafile/symlink object in @sha1 parameter.\n */\nint\nseaf_fs_manager_index_file_blocks (SeafFSManager *mgr,\n                                   const char *repo_id,\n                                   int version,\n                                   GList *paths,\n                                   GList *blockids,\n                                   unsigned char sha1[],\n                                   gint64 file_size);\n\nint\nseaf_fs_manager_index_raw_blocks (SeafFSManager *mgr,\n                                  const char *repo_id,\n                                  int version,\n                                  GList *paths,\n                                  GList *blockids);\n\nint\nseaf_fs_manager_index_existed_file_blocks (SeafFSManager *mgr,\n                                           const char *repo_id,\n                                           int version,\n                                           GList *blockids,\n                                           unsigned char sha1[],\n                                           gint64 file_size);\nint\nseaf_fs_manager_index_blocks (SeafFSManager *mgr,\n                              const char *repo_id,\n                              int version,\n                              const char *file_path,\n                              unsigned char sha1[],\n                              gint64 *size,\n                              SeafileCrypt *crypt,\n                              gboolean write_data,\n                              gboolean use_cdc,\n                              gint64 *indexed);\n\nSeafile *\nseaf_fs_manager_get_seafile (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *file_id);\n\nSeafDir *\nseaf_fs_manager_get_seafdir (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *dir_id);\n\n/* Make sure entries in the returned dir is sorted in descending order.\n */\nSeafDir *\nseaf_fs_manager_get_seafdir_sorted (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *dir_id);\n\nSeafDir *\nseaf_fs_manager_get_seafdir_sorted_by_path (SeafFSManager *mgr,\n                                            const char *repo_id,\n                                            int version,\n                                            const char *root_id,\n                                            const char *path);\n\nint\nseaf_fs_manager_populate_blocklist (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *root_id,\n                                    BlockList *bl);\n\n/*\n * For dir object, set *stop to TRUE to stop traversing the subtree.\n */\ntypedef gboolean (*TraverseFSTreeCallback) (SeafFSManager *mgr,\n                                            const char *repo_id,\n                                            int version,\n                                            const char *obj_id,\n                                            int type,\n                                            void *user_data,\n                                            gboolean *stop);\n\nint\nseaf_fs_manager_traverse_tree (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *root_id,\n                               TraverseFSTreeCallback callback,\n                               void *user_data,\n                               gboolean skip_errors);\n\ntypedef gboolean (*TraverseFSPathCallback) (SeafFSManager *mgr,\n                                            const char *path,\n                                            SeafDirent *dent,\n                                            void *user_data,\n                                            gboolean *stop);\n\nint\nseaf_fs_manager_traverse_path (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *root_id,\n                               const char *dir_path,\n                               TraverseFSPathCallback callback,\n                               void *user_data);\n\ngboolean\nseaf_fs_manager_object_exists (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *id);\n\nvoid\nseaf_fs_manager_delete_object (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *id);\n\ngint64\nseaf_fs_manager_get_file_size (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *file_id);\n\ngint64\nseaf_fs_manager_get_fs_size (SeafFSManager *mgr,\n                             const char *repo_id,\n                             int version,\n                             const char *root_id);\n\n#ifndef SEAFILE_SERVER\nint\nseafile_write_chunk (const char *repo_id,\n                     int version,\n                     CDCDescriptor *chunk,\n                     SeafileCrypt *crypt,\n                     uint8_t *checksum,\n                     gboolean write_data);\nint\nseafile_check_write_chunk (CDCDescriptor *chunk,\n                           uint8_t *sha1,\n                           gboolean write_data);\n#endif /* SEAFILE_SERVER */\n\nuint32_t\ncalculate_chunk_size (uint64_t total_size);\n\nint\nseaf_fs_manager_count_fs_files (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *root_id);\n\nSeafDir *\nseaf_fs_manager_get_seafdir_by_path(SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *root_id,\n                                    const char *path,\n                                    GError **error);\nchar *\nseaf_fs_manager_get_seafile_id_by_path (SeafFSManager *mgr,\n                                        const char *repo_id,\n                                        int version,\n                                        const char *root_id,\n                                        const char *path,\n                                        GError **error);\n\nchar *\nseaf_fs_manager_path_to_obj_id (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *root_id,\n                                const char *path,\n                                guint32 *mode,\n                                GError **error);\n\nchar *\nseaf_fs_manager_get_seafdir_id_by_path (SeafFSManager *mgr,\n                                        const char *repo_id,\n                                        int version,\n                                        const char *root_id,\n                                        const char *path,\n                                        GError **error);\n\nSeafDirent *\nseaf_fs_manager_get_dirent_by_path (SeafFSManager *mgr,\n                                    const char *repo_id,\n                                    int version,\n                                    const char *root_id,\n                                    const char *path,\n                                    GError **error);\n\n/* Check object integrity. */\n\ngboolean\nseaf_fs_manager_verify_seafdir (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *dir_id,\n                                gboolean verify_id,\n                                gboolean *io_error);\n\ngboolean\nseaf_fs_manager_verify_seafile (SeafFSManager *mgr,\n                                const char *repo_id,\n                                int version,\n                                const char *file_id,\n                                gboolean verify_id,\n                                gboolean *io_error);\n\ngboolean\nseaf_fs_manager_verify_object (SeafFSManager *mgr,\n                               const char *repo_id,\n                               int version,\n                               const char *obj_id,\n                               gboolean verify_id,\n                               gboolean *io_error);\n\nint\ndir_version_from_repo_version (int repo_version);\n\nint\nseafile_version_from_repo_version (int repo_version);\n\nstruct _CDCFileDescriptor;\nvoid\nseaf_fs_manager_calculate_seafile_id_json (int repo_version,\n                                           struct _CDCFileDescriptor *cdc,\n                                           guint8 *file_id_sha1);\n\nint\nseaf_fs_manager_remove_store (SeafFSManager *mgr,\n                              const char *store_id);\n\nGObject *\nseaf_fs_manager_get_file_count_info_by_path (SeafFSManager *mgr,\n                                             const char *repo_id,\n                                             int version,\n                                             const char *root_id,\n                                             const char *path,\n                                             GError **error);\n\nGList *\nseaf_fs_manager_search_files_by_path (SeafFSManager *mgr,\n                                      const char *repo_id,\n                                      const char *path,\n                                      const char *str);\n\n#endif\n"
  },
  {
    "path": "common/group-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"seaf-db.h\"\n#include \"group-mgr.h\"\n#include \"org-mgr.h\"\n#include \"seaf-utils.h\"\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#define DEFAULT_MAX_CONNECTIONS 100\n\nstruct _CcnetGroupManagerPriv {\n    CcnetDB\t*db;\n    const char *table_name;\n};\n\nstatic int open_db (CcnetGroupManager *manager);\nstatic int check_db_table (CcnetGroupManager *manager, CcnetDB *db);\n\nCcnetGroupManager* ccnet_group_manager_new (SeafileSession *session)\n{\n    CcnetGroupManager *manager = g_new0 (CcnetGroupManager, 1);\n\n    manager->session = session;\n    manager->priv = g_new0 (CcnetGroupManagerPriv, 1);\n\n    return manager;\n}\n\nint\nccnet_group_manager_init (CcnetGroupManager *manager)\n{\n    return 0;\n}\n\nint\nccnet_group_manager_prepare (CcnetGroupManager *manager)\n{\n    const char *table_name = g_getenv(\"SEAFILE_MYSQL_DB_GROUP_TABLE_NAME\");\n    if (!table_name || g_strcmp0 (table_name, \"\") == 0)\n        manager->priv->table_name = g_strdup (\"Group\");\n    else\n        manager->priv->table_name = g_strdup (table_name);\n\n    return open_db(manager);\n}\n\nvoid ccnet_group_manager_start (CcnetGroupManager *manager)\n{\n}\n\nstatic CcnetDB *\nopen_sqlite_db (CcnetGroupManager *manager)\n{\n    CcnetDB *db = NULL;\n    char *db_dir;\n    char *db_path;\n\n    db_dir = g_build_filename (manager->session->ccnet_dir, \"GroupMgr\", NULL);\n    if (checkdir_with_mkdir(db_dir) < 0) {\n        ccnet_error (\"Cannot open db dir %s: %s\\n\", db_dir,\n                     strerror(errno));\n        g_free (db_dir);\n        return NULL;\n    }\n    g_free (db_dir);\n\n    db_path = g_build_filename (manager->session->ccnet_dir, \"GroupMgr\",\n                                \"groupmgr.db\", NULL);\n    db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);\n\n    g_free (db_path);\n\n    return db;\n}\n\nstatic int\nopen_db (CcnetGroupManager *manager)\n{\n    CcnetDB *db = NULL;\n\n    switch (seaf_db_type(manager->session->ccnet_db)) {\n    case SEAF_DB_TYPE_SQLITE:\n        db = open_sqlite_db (manager);\n        break;\n    case SEAF_DB_TYPE_PGSQL:\n    case SEAF_DB_TYPE_MYSQL:\n        db = manager->session->ccnet_db;\n        break;\n    }\n\n    if (!db)\n        return -1;\n    \n    manager->priv->db = db;\n    if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        && check_db_table (manager, db) < 0) {\n        ccnet_warning (\"Failed to create group db tables.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\n/* -------- Group Database Management ---------------- */\n\nstatic int check_db_table (CcnetGroupManager *manager, CcnetDB *db)\n{\n    char *sql;\n    GString *group_sql = g_string_new (\"\");\n    const char *table_name = manager->priv->table_name;\n\n    int db_type = seaf_db_type (db);\n    if (db_type == SEAF_DB_TYPE_MYSQL) {\n        g_string_printf (group_sql,\n            \"CREATE TABLE IF NOT EXISTS `%s` (`group_id` BIGINT \"\n            \" PRIMARY KEY AUTO_INCREMENT, `group_name` VARCHAR(255),\"\n            \" `creator_name` VARCHAR(255), `timestamp` BIGINT,\"\n            \" `type` VARCHAR(32), `parent_group_id` INTEGER)\"\n            \"ENGINE=INNODB\", table_name);\n        if (seaf_db_query (db, group_sql->str) < 0) {\n            g_string_free (group_sql, TRUE);\n            return -1;\n        }\n\n        sql = \"CREATE TABLE IF NOT EXISTS `GroupUser` ( \"\n            \"`id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, `group_id` BIGINT,\"\n            \" `user_name` VARCHAR(255), `is_staff` tinyint, UNIQUE INDEX\"\n            \" (`group_id`, `user_name`), INDEX (`user_name`))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupDNPair ( \"\n            \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER,\"\n            \" dn VARCHAR(255))ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupStructure ( \"\n              \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, group_id INTEGER, \"\n              \"path VARCHAR(1024), UNIQUE INDEX(group_id))ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    } else if (db_type == SEAF_DB_TYPE_SQLITE) {\n        g_string_printf (group_sql,\n            \"CREATE TABLE IF NOT EXISTS `%s` (`group_id` INTEGER\"\n            \" PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255),\"\n            \" `creator_name` VARCHAR(255), `timestamp` BIGINT,\"\n            \" `type` VARCHAR(32), `parent_group_id` INTEGER)\", table_name);\n        if (seaf_db_query (db, group_sql->str) < 0) {\n            g_string_free (group_sql, TRUE);\n            return -1;\n        }\n\n        sql = \"CREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, \"\n            \"`user_name` VARCHAR(255), `is_staff` tinyint)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on \"\n            \"`GroupUser` (`group_id`, `user_name`)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS username_indx on \"\n            \"`GroupUser` (`user_name`)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER,\"\n            \" dn VARCHAR(255))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, \"\n              \"path VARCHAR(1024))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS path_indx on \"\n            \"`GroupStructure` (`path`)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n    } else if (db_type == SEAF_DB_TYPE_PGSQL) {\n        g_string_printf (group_sql,\n            \"CREATE TABLE IF NOT EXISTS \\\"%s\\\" (group_id SERIAL\"\n            \" PRIMARY KEY, group_name VARCHAR(255),\"\n            \" creator_name VARCHAR(255), timestamp BIGINT,\"\n            \" type VARCHAR(32), parent_group_id INTEGER)\", table_name);\n        if (seaf_db_query (db, group_sql->str) < 0) {\n            g_string_free (group_sql, TRUE);\n            return -1;\n        }\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupUser (group_id INTEGER,\"\n            \" user_name VARCHAR(255), is_staff smallint, UNIQUE \"\n            \" (group_id, user_name))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"groupuser_username_idx\")) {\n        //    sql = \"CREATE INDEX groupuser_username_idx ON GroupUser (user_name)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER,\"\n            \" dn VARCHAR(255))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, \"\n              \"path VARCHAR(1024))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"structure_path_idx\")) {\n        //    sql = \"CREATE INDEX structure_path_idx ON GroupStructure (path)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n\n    }\n    g_string_free (group_sql, TRUE);\n\n    return 0;\n}\n\nstatic gboolean\nget_group_id_cb (CcnetDBRow *row, void *data)\n{\n    int *id = data;\n    int group_id = seaf_db_row_get_column_int(row, 0);\n    *id = group_id;\n\n    return FALSE;\n}\n\nstatic gboolean\nget_group_path_cb (CcnetDBRow *row, void *data)\n{\n    char **path = (char **)data;\n    const char *group_path = seaf_db_row_get_column_text (row, 0);\n    *path = g_strdup (group_path);\n\n    return FALSE;\n}\n\nstatic int\ncreate_group_common (CcnetGroupManager *mgr,\n                     const char *group_name,\n                     const char *user_name,\n                     int parent_group_id,\n                     GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    gint64 now = get_current_time();\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    int group_id = -1;\n    CcnetDBTrans *trans = seaf_db_begin_transaction (db);\n\n    char *user_name_l = g_ascii_strdown (user_name, -1);\n    \n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql,\n            \"INSERT INTO \\\"%s\\\"(group_name, \"\n            \"creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)\", table_name);\n    else\n        g_string_printf (sql,\n            \"INSERT INTO `%s`(group_name, \"\n            \"creator_name, timestamp, parent_group_id) VALUES(?, ?, ?, ?)\", table_name);\n\n    if (seaf_db_trans_query (trans, sql->str, 4,\n                              \"string\", group_name, \"string\", user_name_l,\n                              \"int64\", now, \"int\", parent_group_id) < 0)\n        goto error;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql,\n            \"SELECT group_id FROM \\\"%s\\\" WHERE \"\n            \"group_name = ? AND creator_name = ? \"\n            \"AND timestamp = ?\", table_name);\n    else\n        g_string_printf (sql,\n            \"SELECT group_id FROM `%s` WHERE \"\n            \"group_name = ? AND creator_name = ? \"\n            \"AND timestamp = ?\", table_name);\n\n    seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_id_cb,\n                                         &group_id, 3, \"string\", group_name,\n                                         \"string\", user_name_l, \"int64\", now);\n\n    if (group_id < 0)\n        goto error;\n\n    if (g_strcmp0(user_name, \"system admin\") != 0) {\n        g_string_printf (sql, \"INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)\");\n\n        if (seaf_db_trans_query (trans, sql->str, 3,\n                                  \"int\", group_id, \"string\", user_name_l,\n                                  \"int\", 1) < 0)\n            goto error;\n    }\n\n    if (parent_group_id == -1) {\n        g_string_printf (sql, \"INSERT INTO GroupStructure (group_id, path) VALUES (?,'%d')\", group_id);\n        if (seaf_db_trans_query (trans, sql->str, 1, \"int\", group_id) < 0)\n            goto error;\n    } else if (parent_group_id > 0) {\n        g_string_printf (sql, \"SELECT path FROM GroupStructure WHERE group_id=?\");\n        char *path = NULL;\n        seaf_db_trans_foreach_selected_row (trans, sql->str, get_group_path_cb,\n                                             &path, 1, \"int\", parent_group_id);\n        if (!path)\n            goto error;\n        g_string_printf (sql, \"INSERT INTO GroupStructure (group_id, path) VALUES (?, '%s, %d')\", path, group_id);\n        if (seaf_db_trans_query (trans, sql->str, 1, \"int\", group_id) < 0) {\n            g_free (path);\n            goto error;\n        }\n        g_free (path);\n    }\n\n    seaf_db_commit (trans);\n    seaf_db_trans_close (trans);\n    g_string_free (sql, TRUE);\n    g_free (user_name_l);\n    return group_id;\n\nerror:\n    seaf_db_rollback (trans);\n    seaf_db_trans_close (trans);\n    g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create group\");\n    g_string_free (sql, TRUE);\n    g_free (user_name_l);\n    return -1;\n}\n\nint ccnet_group_manager_create_group (CcnetGroupManager *mgr,\n                                      const char *group_name,\n                                      const char *user_name,\n                                      int parent_group_id,\n                                      GError **error)\n{\n    return create_group_common (mgr, group_name, user_name, parent_group_id, error);\n}\n\n/* static gboolean */\n/* duplicate_org_group_name (CcnetGroupManager *mgr, */\n/*                           int org_id, */\n/*                           const char *group_name) */\n/* { */\n/*     GList *org_groups = NULL, *ptr; */\n/*     CcnetOrgManager *org_mgr = seaf->org_mgr; */\n    \n/*     org_groups = ccnet_org_manager_get_org_groups (org_mgr, org_id, -1, -1); */\n/*     if (!org_groups) */\n/*         return FALSE; */\n\n/*     for (ptr = org_groups; ptr; ptr = ptr->next) { */\n/*         int group_id = (int)(long)ptr->data; */\n/*         CcnetGroup *group = ccnet_group_manager_get_group (mgr, group_id, */\n/*                                                            NULL); */\n/*         if (!group) */\n/*             continue; */\n\n/*         if (g_strcmp0 (group_name, ccnet_group_get_group_name(group)) == 0) { */\n/*             g_list_free (org_groups); */\n/*             g_object_unref (group); */\n/*             return TRUE; */\n/*         } else { */\n/*             g_object_unref (group); */\n/*         } */\n/*     } */\n\n/*     g_list_free (org_groups); */\n/*     return FALSE; */\n/* } */\n\nint ccnet_group_manager_create_org_group (CcnetGroupManager *mgr,\n                                          int org_id,\n                                          const char *group_name,\n                                          const char *user_name,\n                                          int parent_group_id,\n                                          GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    \n    /* if (duplicate_org_group_name (mgr, org_id, group_name)) { */\n    /*     g_set_error (error, CCNET_DOMAIN, 0, */\n    /*                  \"The group has already created in this org.\"); */\n    /*     return -1; */\n    /* } */\n\n    int group_id = create_group_common (mgr, group_name, user_name, parent_group_id, error);\n    if (group_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create org group.\");\n        return -1;\n    }\n\n    if (ccnet_org_manager_add_org_group (org_mgr, org_id, group_id,\n                                         error) < 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create org group.\");\n        return -1;\n    }\n\n    return group_id;\n}\n\nstatic gboolean\ncheck_group_staff (CcnetDB *db, int group_id, const char *user_name, gboolean in_structure)\n{\n    gboolean exists, err;\n    if (!in_structure) {\n        exists = seaf_db_statement_exists (db, \"SELECT group_id FROM GroupUser WHERE \"\n                                          \"group_id = ? AND user_name = ? AND \"\n                                          \"is_staff = 1\", &err,\n                                          2, \"int\", group_id, \"string\", user_name);\n        if (err) {\n            ccnet_warning (\"DB error when check staff user exist in GroupUser.\\n\");\n            return FALSE;\n        }\n        return exists;\n    }\n\n\n    GString *sql = g_string_new(\"\");\n    g_string_printf (sql, \"SELECT path FROM GroupStructure WHERE group_id=?\");\n    char *path = seaf_db_statement_get_string (db, sql->str, 1, \"int\", group_id);\n\n\n    if (!path) {\n        exists = seaf_db_statement_exists (db, \"SELECT group_id FROM GroupUser WHERE \"\n                                            \"group_id = ? AND user_name = ? AND \"\n                                            \"is_staff = 1\", &err,\n                                            2, \"int\", group_id, \"string\", user_name);\n    } else {\n        g_string_printf (sql, \"SELECT group_id FROM GroupUser WHERE \"\n                              \"group_id IN (%s) AND user_name = ? AND \"\n                              \"is_staff = 1\", path);\n        exists = seaf_db_statement_exists (db, sql->str, &err,\n                                            1, \"string\", user_name);\n    }\n    g_string_free (sql, TRUE);\n    g_free (path);\n\n    if (err) {\n        ccnet_warning (\"DB error when check staff user exist in GroupUser.\\n\");\n        return FALSE;\n    }\n\n    return exists;\n}\n\nint ccnet_group_manager_remove_group (CcnetGroupManager *mgr,\n                                      int group_id,\n                                      gboolean remove_anyway,\n                                      GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GString *sql = g_string_new (\"\");\n    gboolean exists, err;\n    const char *table_name = mgr->priv->table_name;\n\n    /* No permission check here, since both group staff and seahub staff\n     * can remove group.\n     */\n     if (remove_anyway != TRUE) {\n        if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n            g_string_printf (sql, \"SELECT 1 FROM \\\"%s\\\" WHERE parent_group_id=?\", table_name);\n        else\n            g_string_printf (sql, \"SELECT 1 FROM `%s` WHERE parent_group_id=?\", table_name);\n        exists = seaf_db_statement_exists (db, sql->str, &err, 1, \"int\", group_id);\n        if (err) {\n            ccnet_warning (\"DB error when check remove group.\\n\");\n            g_string_free (sql, TRUE);\n            return -1;\n        }\n        if (exists) {\n            ccnet_warning (\"Failed to remove group [%d] whose child group must be removed first.\\n\", group_id);\n            g_string_free (sql, TRUE);\n            return -1;\n        }\n     }\n    \n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql, \"DELETE FROM \\\"%s\\\" WHERE group_id=?\", table_name);\n    else\n        g_string_printf (sql, \"DELETE FROM `%s` WHERE group_id=?\", table_name);\n    seaf_db_statement_query (db, sql->str, 1, \"int\", group_id);\n\n    g_string_printf (sql, \"DELETE FROM GroupUser WHERE group_id=?\");\n    seaf_db_statement_query (db, sql->str, 1, \"int\", group_id);\n\n    g_string_printf (sql, \"DELETE FROM GroupStructure WHERE group_id=?\");\n    seaf_db_statement_query (db, sql->str, 1, \"int\", group_id);\n\n    g_string_free (sql, TRUE);\n    \n    return 0;\n}\n\nstatic gboolean\ncheck_group_exists (CcnetGroupManager *mgr, CcnetDB *db, int group_id)\n{\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    gboolean exists, err;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        g_string_printf (sql, \"SELECT group_id FROM \\\"%s\\\" WHERE group_id=?\", table_name);\n        exists = seaf_db_statement_exists (db, sql->str, &err, 1, \"int\", group_id);\n    } else {\n        g_string_printf (sql, \"SELECT group_id FROM `%s` WHERE group_id=?\", table_name);\n        exists = seaf_db_statement_exists (db, sql->str, &err, 1, \"int\", group_id);\n    }\n    g_string_free (sql, TRUE);\n\n    if (err) {\n        ccnet_warning (\"DB error when check group exist.\\n\");\n        return FALSE;\n    }\n    return exists;\n}\n\nint ccnet_group_manager_add_member (CcnetGroupManager *mgr,\n                                    int group_id,\n                                    const char *user_name,\n                                    const char *member_name,\n                                    GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    /* check whether group exists */\n    if (!check_group_exists (mgr, db, group_id)) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Group not exists\");\n        return -1;\n    }\n\n    char *member_name_l = g_ascii_strdown (member_name, -1);\n    int rc = seaf_db_statement_query (db, \"INSERT INTO GroupUser (group_id, user_name, is_staff) VALUES (?, ?, ?)\",\n                                       3, \"int\", group_id, \"string\", member_name_l,\n                                       \"int\", 0);\n    g_free (member_name_l);\n    if (rc < 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to add member to group\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint ccnet_group_manager_remove_member (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name,\n                                       const char *member_name,\n                                       GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n\n    /* check whether group exists */\n    if (!check_group_exists (mgr, db, group_id)) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Group not exists\");\n        return -1;\n    }\n\n    /* can not remove myself */\n    if (g_strcmp0 (user_name, member_name) == 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Can not remove myself\");\n        return -1;\n    }\n\n    sql = \"DELETE FROM GroupUser WHERE group_id=? AND user_name=?\";\n    seaf_db_statement_query (db, sql, 2, \"int\", group_id, \"string\", member_name);\n\n    return 0;\n}\n\nint ccnet_group_manager_set_admin (CcnetGroupManager *mgr,\n                                   int group_id,\n                                   const char *member_name,\n                                   GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    seaf_db_statement_query (db,\n                              \"UPDATE GroupUser SET is_staff = 1 \"\n                              \"WHERE group_id = ? and user_name = ?\",\n                              2, \"int\", group_id, \"string\", member_name);\n\n    return 0;\n}\n\nint ccnet_group_manager_unset_admin (CcnetGroupManager *mgr,\n                                     int group_id,\n                                     const char *member_name,\n                                     GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    seaf_db_statement_query (db,\n                              \"UPDATE GroupUser SET is_staff = 0 \"\n                              \"WHERE group_id = ? and user_name = ?\",\n                              2, \"int\", group_id, \"string\", member_name);\n\n    return 0;\n}\n\nint ccnet_group_manager_set_group_name (CcnetGroupManager *mgr,\n                                        int group_id,\n                                        const char *group_name,\n                                        GError **error)\n{\n    const char *table_name = mgr->priv->table_name;\n    GString *sql = g_string_new (\"\");\n    CcnetDB *db = mgr->priv->db;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        g_string_printf (sql, \"UPDATE \\\"%s\\\" SET group_name = ? \"\n                              \"WHERE group_id = ?\", table_name);\n        seaf_db_statement_query (db, sql->str, 2, \"string\", group_name, \"int\", group_id);\n    } else {\n        g_string_printf (sql, \"UPDATE `%s` SET group_name = ? \"\n                              \"WHERE group_id = ?\", table_name);\n        seaf_db_statement_query (db, sql->str, 2, \"string\", group_name, \"int\", group_id);\n    }\n    g_string_free (sql, TRUE);\n\n    return 0;\n}\n\nint ccnet_group_manager_quit_group (CcnetGroupManager *mgr,\n                                    int group_id,\n                                    const char *user_name,\n                                    GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    \n    /* check whether group exists */\n    if (!check_group_exists (mgr, db, group_id)) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Group not exists\");\n        return -1;\n    }\n\n    seaf_db_statement_query (db,\n                              \"DELETE FROM GroupUser WHERE group_id=? \"\n                              \"AND user_name=?\",\n                              2, \"int\", group_id, \"string\", user_name);\n\n    return 0;\n}\n\nstatic gboolean\nget_user_groups_cb (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n    CcnetGroup *group;\n\n    int group_id = seaf_db_row_get_column_int (row, 0);\n    const char *group_name = seaf_db_row_get_column_text (row, 1);\n    const char *creator_name = seaf_db_row_get_column_text (row, 2);\n    gint64 ts = seaf_db_row_get_column_int64 (row, 3);\n    int parent_group_id = seaf_db_row_get_column_int (row, 4);\n\n    group = g_object_new (CCNET_TYPE_GROUP,\n                          \"id\", group_id,\n                          \"group_name\", group_name,\n                          \"creator_name\", creator_name,\n                          \"timestamp\", ts,\n                          \"source\", \"DB\",\n                          \"parent_group_id\", parent_group_id,\n                          NULL);\n\n    *plist = g_list_append (*plist, group);\n\n    return TRUE;\n}\n\nGList *\nccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    CcnetGroup *group = NULL;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n\n    g_string_printf (sql, \"SELECT path FROM GroupStructure WHERE group_id=?\");\n\n    char *path = seaf_db_statement_get_string (db, sql->str, 1, \"int\", group_id);\n\n    if (path) {\n        if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n            g_string_printf (sql, \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n                             \"\\\"%s\\\" g WHERE g.group_id IN(%s) \"\n                             \"ORDER BY g.group_id\",\n                             table_name, path);\n        else\n            g_string_printf (sql, \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n                             \"`%s` g WHERE g.group_id IN(%s) \"\n                             \"ORDER BY g.group_id\",\n                             table_name, path);\n\n        if (seaf_db_statement_foreach_row (db, sql->str, get_user_groups_cb, &ret, 0) < 0) {\n            ccnet_warning (\"Failed to get ancestor groups of group %d\\n\", group_id);\n            g_string_free (sql, TRUE);\n            g_free (path);\n            return NULL;\n        }\n        g_string_free (sql, TRUE);\n        g_free (path);\n    } else { // group is not in structure, return itself.\n        group = ccnet_group_manager_get_group (mgr, group_id, NULL);\n        if (group) {\n            ret = g_list_prepend (ret, group);\n        }\n    }\n\n    return ret;\n}\n\nstatic gint\ngroup_comp_func (gconstpointer a, gconstpointer b)\n{\n    CcnetGroup *g1 = (CcnetGroup *)a;\n    CcnetGroup *g2 = (CcnetGroup *)b;\n    int id_1 = 0, id_2 = 0;\n    g_object_get (g1, \"id\", &id_1, NULL);\n    g_object_get (g2, \"id\", &id_2, NULL);\n\n    if (id_1 == id_2)\n        return 0;\n    return id_1 > id_2 ? -1 : 1;\n}\n\ngboolean\nget_group_paths_cb (CcnetDBRow *row, void *data)\n{\n    GString *paths = data;\n    const char *path = seaf_db_row_get_column_text (row, 0);\n\n    if (g_strcmp0 (paths->str, \"\") == 0)\n        g_string_append_printf (paths, \"%s\", path);\n    else\n        g_string_append_printf (paths, \", %s\", path);\n\n    return TRUE;\n}\n\nGList *\nccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr,\n                                        const char *user_name,\n                                        gboolean return_ancestors,\n                                        GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *groups = NULL, *ret = NULL;\n    GList *ptr;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    CcnetGroup *group;\n    int parent_group_id = 0, group_id = 0;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql, \n            \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"\\\"%s\\\" g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC\",\n            table_name);\n    else\n        g_string_printf (sql,\n            \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC\",\n            table_name);\n\n    if (seaf_db_statement_foreach_row (db,\n                                        sql->str,\n                                        get_user_groups_cb,\n                                        &groups,\n                                        1, \"string\", user_name) < 0) {\n        g_string_free (sql, TRUE);\n        return NULL;\n    }\n\n    if (!return_ancestors) {\n        g_string_free (sql, TRUE);\n        return groups;\n    }\n\n    /* Get ancestor groups in descending order by group_id.*/\n    GString *paths = g_string_new (\"\");\n    g_string_erase (sql, 0, -1);\n    for (ptr = groups; ptr; ptr = ptr->next) {\n        group = ptr->data;\n        g_object_get (group, \"parent_group_id\", &parent_group_id, NULL);\n        g_object_get (group, \"id\", &group_id, NULL);\n        if (parent_group_id != 0) {\n            if (g_strcmp0(sql->str, \"\") == 0)\n                g_string_append_printf (sql, \"SELECT path FROM GroupStructure WHERE group_id IN (%d\", group_id);\n            else\n                g_string_append_printf (sql, \", %d\", group_id);\n        } else {\n            g_object_ref (group);\n            ret = g_list_insert_sorted (ret, group, group_comp_func);\n        }\n    }\n    if (g_strcmp0(sql->str, \"\") != 0) {\n        g_string_append_printf (sql, \")\");\n        if (seaf_db_statement_foreach_row (db,\n                                            sql->str,\n                                            get_group_paths_cb,\n                                            paths, 0) < 0) {\n            g_list_free_full (ret, g_object_unref);\n            ret = NULL;\n            goto out;\n        }\n        if (g_strcmp0(paths->str, \"\") == 0) {\n            ccnet_warning (\"Failed to get groups path for user %s\\n\", user_name);\n            g_list_free_full (ret, g_object_unref);\n            ret = NULL;\n            goto out;\n        }\n\n        g_string_printf (sql, \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n                         \"`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC\",\n                         table_name, paths->str);\n        if (seaf_db_statement_foreach_row (db,\n                                        sql->str,\n                                        get_user_groups_cb,\n                                        &ret, 0) < 0) {\n            g_list_free_full (ret, g_object_unref);\n            ret = NULL;\n            goto out;\n        }\n    }\n    ret = g_list_sort (ret, group_comp_func);\n\nout:\n    g_string_free (sql, TRUE);\n    g_list_free_full (groups, g_object_unref);\n    g_string_free (paths, TRUE);\n\n    return ret;\n}\n\nstatic gboolean\nget_ccnetgroup_cb (CcnetDBRow *row, void *data)\n{\n    CcnetGroup **p_group = data;\n    int group_id;\n    const char *group_name;\n    const char *creator;\n    int parent_group_id;\n    gint64 ts;\n    \n    group_id = seaf_db_row_get_column_int (row, 0);\n    group_name = (const char *)seaf_db_row_get_column_text (row, 1);\n    creator = (const char *)seaf_db_row_get_column_text (row, 2);\n    ts = seaf_db_row_get_column_int64 (row, 3);\n    parent_group_id = seaf_db_row_get_column_int (row, 4);\n\n    char *creator_l = g_ascii_strdown (creator, -1);\n    *p_group = g_object_new (CCNET_TYPE_GROUP,\n                             \"id\", group_id,\n                             \"group_name\", group_name,\n                             \"creator_name\", creator_l,\n                             \"timestamp\", ts,\n                             \"source\", \"DB\",\n                             \"parent_group_id\", parent_group_id,\n                             NULL);\n    g_free (creator_l);\n\n    return FALSE;\n}\n\nGList *\nccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id,\n                                      GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GString *sql = g_string_new (\"\");\n    GList *ret = NULL;\n    const char *table_name = mgr->priv->table_name;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql,\n            \"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"\\\"%s\\\" WHERE parent_group_id=?\", table_name);\n    else\n        g_string_printf (sql,\n            \"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"`%s` WHERE parent_group_id=?\", table_name);\n    if (seaf_db_statement_foreach_row (db, sql->str,\n                                        get_user_groups_cb, &ret,\n                                        1, \"int\", group_id) < 0) {\n        g_string_free (sql, TRUE);\n        return NULL;\n    }\n    g_string_free (sql, TRUE);\n\n    return ret;\n}\n\nGList *\nccnet_group_manager_get_descendants_groups(CcnetGroupManager *mgr, int group_id,\n                                           GError **error)\n{\n    GList *ret = NULL;\n    CcnetDB *db = mgr->priv->db;\n    const char *table_name = mgr->priv->table_name;\n\n    GString *sql = g_string_new(\"\");\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql, \"SELECT g.group_id, group_name, creator_name, timestamp, \"\n                              \"parent_group_id FROM \\\"%s\\\" g, GroupStructure s \"\n                              \"WHERE g.group_id=s.group_id \"\n                              \"AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' \"\n                              \"OR g.group_id=?)\",\n                              table_name, group_id, group_id);\n\n    else\n        g_string_printf (sql, \"SELECT g.group_id, group_name, creator_name, timestamp, \"\n                              \"parent_group_id FROM `%s` g, GroupStructure s \"\n                              \"WHERE g.group_id=s.group_id \"\n                              \"AND (s.path LIKE '%d, %%' OR s.path LIKE '%%, %d, %%' \"\n                              \"OR g.group_id=?)\",\n                              table_name, group_id, group_id);\n\n    if (seaf_db_statement_foreach_row (db, sql->str,\n                                        get_user_groups_cb, &ret,\n                                        1, \"int\", group_id) < 0) {\n        g_string_free (sql, TRUE);\n        return NULL;\n    }\n    g_string_free (sql, TRUE);\n\n    return ret;\n}\n\nCcnetGroup *\nccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id,\n                               GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GString *sql = g_string_new (\"\");\n    CcnetGroup *ccnetgroup = NULL;\n    const char *table_name = mgr->priv->table_name;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        g_string_printf (sql,\n            \"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"\\\"%s\\\" WHERE group_id = ?\", table_name);\n    else\n        g_string_printf (sql,\n            \"SELECT group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"`%s` WHERE group_id = ?\", table_name);\n    if (seaf_db_statement_foreach_row (db, sql->str,\n                                        get_ccnetgroup_cb, &ccnetgroup,\n                                        1, \"int\", group_id) < 0) {\n        g_string_free (sql, TRUE);\n        return NULL;\n    }\n    g_string_free (sql, TRUE);\n\n    return ccnetgroup;\n}\n\nstatic gboolean\nget_ccnet_groupuser_cb (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n    CcnetGroupUser *group_user;\n    \n    int group_id = seaf_db_row_get_column_int (row, 0);\n    const char *user = (const char *)seaf_db_row_get_column_text (row, 1);\n    int is_staff = seaf_db_row_get_column_int (row, 2);\n\n    char *user_l = g_ascii_strdown (user, -1);\n    group_user = g_object_new (CCNET_TYPE_GROUP_USER,\n                               \"group_id\", group_id,\n                               \"user_name\", user_l,\n                               \"is_staff\", is_staff,\n                               NULL);\n    g_free (user_l);\n    if (group_user != NULL) {\n        *plist = g_list_prepend (*plist, group_user);\n    }\n    \n    return TRUE;\n}\n\nGList *\nccnet_group_manager_get_group_members (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       int start,\n                                       int limit,\n                                       GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *group_users = NULL;\n    int rc;\n    \n    if (limit == -1) {\n        sql = \"SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ?\";\n        rc =seaf_db_statement_foreach_row (db, sql,\n                                           get_ccnet_groupuser_cb, &group_users,\n                                           1, \"int\", group_id);\n    } else {\n        sql = \"SELECT group_id, user_name, is_staff FROM GroupUser WHERE group_id = ? LIMIT ? OFFSET ?\";\n        rc = seaf_db_statement_foreach_row (db, sql,\n                                            get_ccnet_groupuser_cb, &group_users,\n                                            3, \"int\", group_id,\n                                            \"int\", limit,\n                                            \"int\", start);\n    }\n\n    if (rc < 0) {\n        return NULL;\n    }\n\n    return g_list_reverse (group_users);\n}\n\nGList *\nccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr,\n                                             int group_id,\n                                             const char *prefix,\n                                             GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *group_users = NULL;\n    GList *ptr;\n    CcnetGroup *group;\n    GString *sql = g_string_new (\"\");\n    int id;\n\n    g_string_printf(sql, \"SELECT group_id, user_name, is_staff FROM GroupUser \"\n                         \"WHERE group_id IN (\");\n    GList *groups = ccnet_group_manager_get_descendants_groups(mgr, group_id, NULL);\n    if (!groups)\n        g_string_append_printf(sql, \"%d\", group_id);\n\n    for (ptr = groups; ptr; ptr = ptr->next) {\n        group = ptr->data;\n        g_object_get(group, \"id\", &id, NULL);\n        g_string_append_printf(sql, \"%d\", id);\n        if (ptr->next)\n            g_string_append_printf(sql, \", \");\n    }\n    g_string_append_printf(sql, \")\");\n    if (prefix)\n        g_string_append_printf(sql, \" AND user_name LIKE '%s%%'\", prefix);\n    g_list_free_full (groups, g_object_unref);\n\n    if (seaf_db_statement_foreach_row (db, sql->str,\n                                        get_ccnet_groupuser_cb, &group_users, 0) < 0) {\n        g_string_free(sql, TRUE);\n        return NULL;\n    }\n    g_string_free(sql, TRUE);\n\n    return group_users;\n}\n\nint\nccnet_group_manager_check_group_staff (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name,\n                                       gboolean in_structure)\n{\n    return check_group_staff (mgr->priv->db, group_id, user_name, in_structure);\n}\n\nint\nccnet_group_manager_remove_group_user (CcnetGroupManager *mgr,\n                                       const char *user)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    seaf_db_statement_query (db,\n                              \"DELETE FROM GroupUser \"\n                              \"WHERE user_name = ?\",\n                              1, \"string\", user);\n\n    return 0;\n}\n\nint\nccnet_group_manager_is_group_user (CcnetGroupManager *mgr,\n                                   int group_id,\n                                   const char *user,\n                                   gboolean in_structure)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    gboolean exists, err;\n    exists = seaf_db_statement_exists (db, \"SELECT group_id FROM GroupUser \"\n                                        \"WHERE group_id=? AND user_name=?\", &err,\n                                        2, \"int\", group_id, \"string\", user);\n    if (err) {\n        ccnet_warning (\"DB error when check user exist in GroupUser.\\n\");\n        return 0;\n    }\n    if (!in_structure || exists)\n        return exists ? 1 : 0;\n\n    GList *ptr;\n    GList *groups = ccnet_group_manager_get_groups_by_user (mgr, user, TRUE, NULL);\n    if (!groups)\n        return 0;\n\n    CcnetGroup *group;\n    int id;\n    for (ptr = groups; ptr; ptr = ptr->next) {\n        group = ptr->data;\n        g_object_get (group, \"id\", &id, NULL);\n        if (group_id == id) {\n            exists = TRUE;\n            break;\n        }\n    }\n    g_list_free_full (groups, g_object_unref);\n\n    return exists ? 1 : 0;\n}\n\nstatic gboolean\nget_all_ccnetgroups_cb (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n    int group_id;\n    const char *group_name;\n    const char *creator;\n    gint64 ts;\n    int parent_group_id;\n\n    group_id = seaf_db_row_get_column_int (row, 0);\n    group_name = (const char *)seaf_db_row_get_column_text (row, 1);\n    creator = (const char *)seaf_db_row_get_column_text (row, 2);\n    ts = seaf_db_row_get_column_int64 (row, 3);\n    parent_group_id = seaf_db_row_get_column_int (row, 4);\n\n    char *creator_l = g_ascii_strdown (creator, -1);\n    CcnetGroup *group = g_object_new (CCNET_TYPE_GROUP,\n                                      \"id\", group_id,\n                                      \"group_name\", group_name,\n                                      \"creator_name\", creator_l,\n                                      \"timestamp\", ts,\n                                      \"source\", \"DB\",\n                                      \"parent_group_id\", parent_group_id,\n                                      NULL);\n    g_free (creator_l);\n\n    *plist = g_list_prepend (*plist, group);\n    \n    return TRUE;\n}\n\nGList *\nccnet_group_manager_get_top_groups (CcnetGroupManager *mgr,\n                                    gboolean including_org,\n                                    GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    int rc;\n\n    if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) {\n        if (including_org)\n            g_string_printf (sql, \"SELECT group_id, group_name, \"\n                                  \"creator_name, timestamp, parent_group_id FROM \\\"%s\\\" \"\n                                  \"WHERE parent_group_id=-1 ORDER BY timestamp DESC\", table_name);\n        else\n            g_string_printf (sql, \"SELECT g.group_id, g.group_name, \"\n                                  \"g.creator_name, g.timestamp, g.parent_group_id FROM \\\"%s\\\" g \"\n                                  \"LEFT JOIN OrgGroup o ON g.group_id = o.group_id \"\n                                  \"WHERE g.parent_group_id=-1 AND o.group_id is NULL \"\n                                  \"ORDER BY timestamp DESC\", table_name);\n    } else {\n        if (including_org)\n            g_string_printf (sql, \"SELECT group_id, group_name, \"\n                                  \"creator_name, timestamp, parent_group_id FROM `%s` \"\n                                  \"WHERE parent_group_id=-1 ORDER BY timestamp DESC\", table_name);\n        else\n            g_string_printf (sql, \"SELECT g.group_id, g.group_name, \"\n                                  \"g.creator_name, g.timestamp, g.parent_group_id FROM `%s` g \"\n                                  \"LEFT JOIN OrgGroup o ON g.group_id = o.group_id \"\n                                  \"WHERE g.parent_group_id=-1 AND o.group_id is NULL \"\n                                  \"ORDER BY timestamp DESC\", table_name);\n    }\n    rc = seaf_db_statement_foreach_row (db, sql->str,\n                                         get_all_ccnetgroups_cb, &ret, 0);\n    g_string_free (sql, TRUE);\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nGList*\nccnet_group_manager_list_all_departments (CcnetGroupManager *mgr,\n                                          GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    int rc;\n    int db_type = seaf_db_type(db);\n\n    if (db_type == SEAF_DB_TYPE_PGSQL) {\n        g_string_printf (sql, \"SELECT group_id, group_name, \"\n                              \"creator_name, timestamp, type, \"\n                              \"parent_group_id FROM \\\"%s\\\" \"\n                              \"WHERE parent_group_id = -1 OR parent_group_id > 0 \"\n                              \"ORDER BY group_id\", table_name);\n        rc = seaf_db_statement_foreach_row (db, sql->str,\n                                             get_all_ccnetgroups_cb, &ret, 0);\n    } else {\n        g_string_printf (sql, \"SELECT `group_id`, `group_name`, \"\n                              \"`creator_name`, `timestamp`, `type`, `parent_group_id` FROM `%s` \"\n                              \"WHERE parent_group_id = -1 OR parent_group_id > 0 \"\n                              \"ORDER BY group_id\", table_name);\n        rc = seaf_db_statement_foreach_row (db, sql->str,\n                                             get_all_ccnetgroups_cb, &ret, 0);\n    }\n    g_string_free (sql, TRUE);\n\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nGList*\nccnet_group_manager_get_all_groups (CcnetGroupManager *mgr,\n                                    int start, int limit, GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n    int rc;\n\n    if (seaf_db_type(mgr->priv->db) == SEAF_DB_TYPE_PGSQL) {\n        if (start == -1 && limit == -1) {\n            g_string_printf (sql, \"SELECT group_id, group_name, \"\n                                  \"creator_name, timestamp, parent_group_id FROM \\\"%s\\\" \"\n                                  \"ORDER BY timestamp DESC\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret, 0);\n        } else {\n            g_string_printf (sql, \"SELECT group_id, group_name, \"\n                                  \"creator_name, timestamp, parent_group_id FROM \\\"%s\\\" \"\n                                  \"ORDER BY timestamp DESC LIMIT ? OFFSET ?\",\n                                  table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 2, \"int\", limit, \"int\", start);\n        }\n    } else {\n        if (start == -1 && limit == -1) {\n            g_string_printf (sql, \"SELECT `group_id`, `group_name`, \"\n                                  \"`creator_name`, `timestamp`, `parent_group_id` FROM `%s` \"\n                                  \"ORDER BY timestamp DESC\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret, 0);\n        } else {\n            g_string_printf (sql, \"SELECT `group_id`, `group_name`, \"\n                                  \"`creator_name`, `timestamp`, `parent_group_id` FROM `%s` \"\n                                  \"ORDER BY timestamp DESC LIMIT ? OFFSET ?\",\n                                  table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 2, \"int\", limit, \"int\", start);\n        }\n    }\n    g_string_free (sql, TRUE);\n\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_group_manager_set_group_creator (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name)\n{\n    CcnetDB *db = mgr->priv->db;\n    const char *table_name = mgr->priv->table_name;\n    GString *sql = g_string_new (\"\");\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        g_string_printf (sql, \"UPDATE \\\"%s\\\" SET creator_name = ? WHERE group_id = ?\",\n                         table_name);\n    } else {\n        g_string_printf (sql, \"UPDATE `%s` SET creator_name = ? WHERE group_id = ?\",\n                         table_name);\n    }\n\n    seaf_db_statement_query (db, sql->str, 2, \"string\", user_name, \"int\", group_id);\n    g_string_free (sql, TRUE);\n\n    return 0;\n    \n}\n\nGList *\nccnet_group_manager_search_groups (CcnetGroupManager *mgr,\n                                   const char *keyword,\n                                   int start, int limit)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    GString *sql = g_string_new (\"\");\n    const char *table_name = mgr->priv->table_name;\n\n    int rc;\n    char *db_patt = g_strdup_printf (\"%%%s%%\", keyword);\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        if (start == -1 && limit == -1) {\n            g_string_printf (sql,\n                             \"SELECT group_id, group_name, \"\n                             \"creator_name, timestamp, parent_group_id \"\n                             \"FROM \\\"%s\\\" WHERE group_name LIKE ?\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 1, \"string\", db_patt);\n        } else {\n            g_string_printf (sql,\n                             \"SELECT group_id, group_name, \"\n                             \"creator_name, timestamp, parent_group_id \"\n                             \"FROM \\\"%s\\\" WHERE group_name LIKE ? \"\n                             \"LIMIT ? OFFSET ?\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 3, \"string\", db_patt,\n                                                 \"int\", limit, \"int\", start);\n        }\n    } else {\n        if (start == -1 && limit == -1) {\n            g_string_printf (sql,\n                             \"SELECT group_id, group_name, \"\n                             \"creator_name, timestamp, parent_group_id \"\n                             \"FROM `%s` WHERE group_name LIKE ?\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 1, \"string\", db_patt);\n        } else {\n            g_string_printf (sql,\n                             \"SELECT group_id, group_name, \"\n                             \"creator_name, timestamp, parent_group_id \"\n                             \"FROM `%s` WHERE group_name LIKE ? \"\n                             \"LIMIT ? OFFSET ?\", table_name);\n            rc = seaf_db_statement_foreach_row (db, sql->str,\n                                                 get_all_ccnetgroups_cb, &ret,\n                                                 3, \"string\", db_patt,\n                                                 \"int\", limit, \"int\", start);\n        }\n    }\n    g_free (db_patt);\n    g_string_free (sql, TRUE);\n\n    if (rc < 0) {\n        while (ret != NULL) {\n            g_object_unref (ret->data);\n            ret = g_list_delete_link (ret, ret);\n        }\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\nget_groups_members_cb (CcnetDBRow *row, void *data)\n{\n    GList **users = data;\n    const char *user = seaf_db_row_get_column_text (row, 0);\n\n    char *user_l = g_ascii_strdown (user, -1);\n    CcnetGroupUser *group_user = g_object_new (CCNET_TYPE_GROUP_USER,\n                                               \"user_name\", user_l,\n                                               NULL);\n    g_free (user_l);\n    *users = g_list_append(*users, group_user);\n\n    return TRUE;\n}\n\n/* group_ids is json format: \"[id1, id2, id3, ...]\" */\nGList *\nccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids,\n                                        GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    GString *sql = g_string_new (\"\");\n    int i, group_id;\n    json_t *j_array = NULL, *j_obj;\n    json_error_t j_error;\n\n    g_string_printf (sql, \"SELECT DISTINCT user_name FROM GroupUser WHERE group_id IN (\");\n    j_array = json_loadb (group_ids, strlen(group_ids), 0, &j_error);\n    if (!j_array) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Bad args.\");\n        g_string_free (sql, TRUE);\n        return NULL;\n    }\n    size_t id_num = json_array_size (j_array);\n\n    for (i = 0; i < id_num; i++) {\n        j_obj = json_array_get (j_array, i);\n        group_id = json_integer_value (j_obj);\n        if (group_id <= 0) {\n            g_set_error (error, CCNET_DOMAIN, 0, \"Bad args.\");\n            g_string_free (sql, TRUE);\n            json_decref (j_array);\n            return NULL;\n        }\n        g_string_append_printf (sql, \"%d\", group_id);\n        if (i + 1 < id_num)\n            g_string_append_printf (sql, \",\");\n    }\n    g_string_append_printf (sql, \")\");\n    json_decref (j_array);\n\n    if (seaf_db_statement_foreach_row (db, sql->str, get_groups_members_cb, &ret, 0) < 0)\n        ccnet_warning(\"Failed to get groups members for group [%s].\\n\", group_ids);\n\n    g_string_free (sql, TRUE);\n\n    return ret;\n}\n\nGList*\nccnet_group_manager_search_group_members (CcnetGroupManager *mgr,\n                                          int group_id,\n                                          const char *pattern)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    char *sql;\n    int rc;\n\n    char *db_patt = g_strdup_printf (\"%%%s%%\", pattern);\n\n    sql = \"SELECT DISTINCT user_name FROM GroupUser \"\n          \"WHERE group_id = ? AND user_name LIKE ? ORDER BY user_name\";\n    rc = seaf_db_statement_foreach_row (db, sql,\n                                        get_groups_members_cb, &ret,\n                                        2, \"int\", group_id, \"string\", db_patt);\n\n    g_free (db_patt);\n    if (rc < 0) {\n        g_list_free_full (ret, g_object_unref);\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_group_manager_update_group_user (CcnetGroupManager *mgr,\n                                       const char *old_email,\n                                       const char *new_email)\n{\n    int rc;\n    CcnetDB *db = mgr->priv->db;\n\n    rc = seaf_db_statement_query (db,\n                                  \"UPDATE GroupUser SET user_name=? \"\n                                  \"WHERE user_name = ?\",\n                                  2, \"string\", new_email, \"string\", old_email);\n    if (rc < 0){\n        return -1;\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "common/group-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef GROUP_MGR_H\n#define GROUP_MGR_H\n\n/* #define MAX_GROUP_MEMBERS\t16 */\n\ntypedef struct _SeafileSession SeafileSession;\ntypedef struct _CcnetGroupManager CcnetGroupManager;\ntypedef struct _CcnetGroupManagerPriv CcnetGroupManagerPriv;\n\nstruct _CcnetGroupManager\n{\n    SeafileSession *session;\n    \n    CcnetGroupManagerPriv\t*priv;\n};\n\nCcnetGroupManager* ccnet_group_manager_new (SeafileSession *session);\n\nint\nccnet_group_manager_prepare (CcnetGroupManager *manager);\n\nvoid ccnet_group_manager_start (CcnetGroupManager *manager);\n\nint ccnet_group_manager_create_group (CcnetGroupManager *mgr,\n                                      const char *group_name,\n                                      const char *user_name,\n                                      int parent_group_id,\n                                      GError **error);\n\nint ccnet_group_manager_create_org_group (CcnetGroupManager *mgr,\n                                          int org_id,\n                                          const char *group_name,\n                                          const char *user_name,\n                                          int parent_group_id,\n                                          GError **error);\n\nint ccnet_group_manager_remove_group (CcnetGroupManager *mgr,\n                                      int group_id,\n                                      gboolean remove_anyway,\n                                      GError **error);\n\nint ccnet_group_manager_add_member (CcnetGroupManager *mgr,\n                                    int group_id,\n                                    const char *user_name,\n                                    const char *member_name,\n                                    GError **error);\n\nint ccnet_group_manager_remove_member (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name,\n                                       const char *member_name,\n                                       GError **error);\n\nint ccnet_group_manager_set_admin (CcnetGroupManager *mgr,\n                                   int group_id,\n                                   const char *member_name,\n                                   GError **error);\n\nint ccnet_group_manager_unset_admin (CcnetGroupManager *mgr,\n                                     int group_id,\n                                     const char *member_name,\n                                     GError **error);\n\nint ccnet_group_manager_set_group_name (CcnetGroupManager *mgr,\n                                        int group_id,\n                                        const char *group_name,\n                                        GError **error);\n\nint ccnet_group_manager_quit_group (CcnetGroupManager *mgr,\n                                    int group_id,\n                                    const char *user_name,\n                                    GError **error);\n\nGList *\nccnet_group_manager_get_groups_by_user (CcnetGroupManager *mgr,\n                                        const char *user_name,\n                                        gboolean return_ancestors,\n                                        GError **error);\n\nCcnetGroup *\nccnet_group_manager_get_group (CcnetGroupManager *mgr, int group_id,\n                               GError **error);\n\nGList *\nccnet_group_manager_get_group_members (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       int start,\n                                       int limit,\n                                       GError **error);\n\nGList *\nccnet_group_manager_get_members_with_prefix (CcnetGroupManager *mgr,\n                                             int group_id,\n                                             const char *prefix,\n                                             GError **error);\n\nint\nccnet_group_manager_check_group_staff (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name,\n                                       int in_structure);\n\nint\nccnet_group_manager_remove_group_user (CcnetGroupManager *mgr,\n                                       const char *user);\n\nint\nccnet_group_manager_is_group_user (CcnetGroupManager *mgr,\n                                   int group_id,\n                                   const char *user,\n                                   gboolean in_structure);\n\nGList*\nccnet_group_manager_list_all_departments (CcnetGroupManager *mgr,\n                                          GError **error);\n\nGList*\nccnet_group_manager_get_all_groups (CcnetGroupManager *mgr,\n                                    int start, int limit, GError **error);\n\nint\nccnet_group_manager_set_group_creator (CcnetGroupManager *mgr,\n                                       int group_id,\n                                       const char *user_name);\n\nGList*\nccnet_group_manager_search_groups (CcnetGroupManager *mgr,\n                                   const char *keyword,\n                                   int start, int limit);\n\nGList*\nccnet_group_manager_search_group_members (CcnetGroupManager *mgr,\n                                          int group_id,\n                                          const char *pattern);\n\nGList *\nccnet_group_manager_get_top_groups (CcnetGroupManager *mgr, gboolean including_org, GError **error);\n\nGList *\nccnet_group_manager_get_child_groups (CcnetGroupManager *mgr, int group_id,\n                                      GError **error);\n\nGList *\nccnet_group_manager_get_descendants_groups (CcnetGroupManager *mgr, int group_id,\n                                            GError **error);\n\nGList *\nccnet_group_manager_get_ancestor_groups (CcnetGroupManager *mgr, int group_id);\n\nGList *\nccnet_group_manager_get_groups_members (CcnetGroupManager *mgr, const char *group_ids,\n                                        GError **error);\n\nint\nccnet_group_manager_update_group_user (CcnetGroupManager *mgr,\n                                       const char *old_email,\n                                       const char *new_email);\n#endif /* GROUP_MGR_H */\n\n"
  },
  {
    "path": "common/log.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <stdio.h>\n#include <glib/gstdio.h>\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\n#include <sys/syslog.h>\n#endif\n#endif\n\n#include \"log.h\"\n#include \"utils.h\"\n\n/* message with greater log levels will be ignored */\nstatic int ccnet_log_level;\nstatic int seafile_log_level;\nstatic char *logfile;\nstatic FILE *logfp;\nstatic gboolean log_to_stdout = FALSE;\nstatic char *app_name;\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\nstatic gboolean enable_syslog;\n#endif\n#endif\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\nstatic int\nget_syslog_level (GLogLevelFlags level)\n{\n    switch (level) {\n        case G_LOG_LEVEL_DEBUG:\n            return LOG_DEBUG;\n        case G_LOG_LEVEL_INFO:\n            return LOG_INFO;\n        case G_LOG_LEVEL_WARNING:\n            return LOG_WARNING;\n        case G_LOG_LEVEL_ERROR:\n            return LOG_ERR;\n        case G_LOG_LEVEL_CRITICAL:\n            return LOG_ERR;\n        default:\n            return LOG_DEBUG;\n    }\n}\n#endif\n#endif\n\nstatic void \nseafile_log (const gchar *log_domain, GLogLevelFlags log_level,\n             const gchar *message,    gpointer user_data)\n{\n    time_t t;\n    struct tm *tm;\n    char buf[1024];\n    int len;\n\n    if (log_level > seafile_log_level)\n        return;\n\n    if (log_to_stdout) {\n        char name_buf[32] = {0};\n        snprintf(name_buf, sizeof(name_buf), \"[%s] \", app_name);\n        fputs (name_buf, logfp);\n    }\n\n    t = time(NULL);\n    tm = localtime(&t);\n    len = strftime (buf, 1024, \"[%Y-%m-%d %H:%M:%S] \", tm);\n    g_return_if_fail (len < 1024);\n    if (logfp) {    \n        fputs (buf, logfp);\n        if (log_level == G_LOG_LEVEL_DEBUG)\n            fputs (\"[DEBUG] \", logfp);\n        else if (log_level == G_LOG_LEVEL_WARNING)\n            fputs (\"[WARNING] \", logfp);\n        else if (log_level == G_LOG_LEVEL_CRITICAL)\n            fputs (\"[ERROR] \", logfp);\n        else\n            fputs (\"[INFO] \", logfp);\n        fputs (message, logfp);\n        fflush (logfp);\n    }\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\n    if (enable_syslog)\n        syslog (get_syslog_level (log_level), \"%s\", message);\n#endif\n#endif\n}\n\nstatic void \nccnet_log (const gchar *log_domain, GLogLevelFlags log_level,\n             const gchar *message,    gpointer user_data)\n{\n    time_t t;\n    struct tm *tm;\n    char buf[1024];\n    int len;\n\n    if (log_level > ccnet_log_level)\n        return;\n\n    t = time(NULL);\n    tm = localtime(&t);\n    len = strftime (buf, 1024, \"[%x %X] \", tm);\n    g_return_if_fail (len < 1024);\n    if (logfp) {\n        fputs (buf, logfp);\n        if (log_level == G_LOG_LEVEL_DEBUG)\n            fputs (\"[DEBUG] \", logfp);\n        else if (log_level == G_LOG_LEVEL_WARNING)\n            fputs (\"[WARNING] \", logfp);\n        else if (log_level == G_LOG_LEVEL_CRITICAL)\n            fputs (\"[ERROR] \", logfp);\n        else\n            fputs (\"[INFO] \", logfp);\n        fputs (message, logfp);\n        fflush (logfp);\n    }\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\n    if (enable_syslog)\n        syslog (get_syslog_level (log_level), \"%s\", message);\n#endif\n#endif\n}\n\nstatic int\nget_debug_level(const char *str, int default_level)\n{\n    if (strcmp(str, \"debug\") == 0)\n        return G_LOG_LEVEL_DEBUG;\n    if (strcmp(str, \"info\") == 0)\n        return G_LOG_LEVEL_INFO;\n    if (strcmp(str, \"warning\") == 0)\n        return G_LOG_LEVEL_WARNING;\n    return default_level;\n}\n\nint\nseafile_log_init (const char *_logfile, const char *ccnet_debug_level_str,\n                  const char *seafile_debug_level_str, const char *_app_name)\n{\n    g_log_set_handler (NULL, G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL\n                       | G_LOG_FLAG_RECURSION, seafile_log, NULL);\n    g_log_set_handler (\"Ccnet\", G_LOG_LEVEL_MASK | G_LOG_FLAG_FATAL\n                       | G_LOG_FLAG_RECURSION, ccnet_log, NULL);\n\n    /* record all log message */\n    ccnet_log_level = get_debug_level(ccnet_debug_level_str, G_LOG_LEVEL_INFO);\n    seafile_log_level = get_debug_level(seafile_debug_level_str, G_LOG_LEVEL_DEBUG);\n\n    app_name = g_strdup (_app_name);\n\n    const char *log_to_stdout_env = g_getenv(\"SEAFILE_LOG_TO_STDOUT\");\n    if (g_strcmp0(log_to_stdout_env, \"true\") == 0) {\n        logfp = stdout;\n        logfile = g_strdup (_logfile);\n        log_to_stdout = TRUE;\n    } else if (g_strcmp0(_logfile, \"-\") == 0) {\n        logfp = stdout;\n        logfile = g_strdup (_logfile);\n    } else {\n        logfile = ccnet_expand_path(_logfile);\n        if ((logfp = g_fopen (logfile, \"a+\")) == NULL) {\n            seaf_message (\"Failed to open file %s\\n\", logfile);\n            return -1;\n        }\n    }\n\n    return 0;\n}\n\nint\nseafile_log_reopen ()\n{\n    FILE *fp, *oldfp;\n\n    if (g_strcmp0(logfile, \"-\") == 0 || log_to_stdout)\n        return 0;\n\n    if ((fp = g_fopen (logfile, \"a+\")) == NULL) {\n        seaf_message (\"Failed to open file %s\\n\", logfile);\n        return -1;\n    }\n\n    //TODO: check file's health\n\n    oldfp = logfp;\n    logfp = fp;\n    if (fclose(oldfp) < 0) {\n        seaf_message (\"Failed to close file %s\\n\", logfile);\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic SeafileDebugFlags debug_flags = 0;\n\nstatic GDebugKey debug_keys[] = {\n  { \"Transfer\", SEAFILE_DEBUG_TRANSFER },\n  { \"Sync\", SEAFILE_DEBUG_SYNC },\n  { \"Watch\", SEAFILE_DEBUG_WATCH },\n  { \"Http\", SEAFILE_DEBUG_HTTP },\n  { \"Merge\", SEAFILE_DEBUG_MERGE },\n  { \"Other\", SEAFILE_DEBUG_OTHER },\n};\n\ngboolean\nseafile_debug_flag_is_set (SeafileDebugFlags flag)\n{\n    return (debug_flags & flag) != 0;\n}\n\nvoid\nseafile_debug_set_flags (SeafileDebugFlags flags)\n{\n    g_message (\"Set debug flags %#x\\n\", flags);\n    debug_flags |= flags;\n}\n\nvoid\nseafile_debug_set_flags_string (const gchar *flags_string)\n{\n    guint nkeys = G_N_ELEMENTS (debug_keys);\n\n    if (flags_string)\n        seafile_debug_set_flags (\n            g_parse_debug_string (flags_string, debug_keys, nkeys));\n}\n\nvoid\nseafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...)\n{\n    if (flag & debug_flags) {\n        va_list args;\n        va_start (args, format);\n        g_logv (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format, args);\n        va_end (args);\n    }\n}\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\nvoid\nset_syslog_config (GKeyFile *config)\n{\n    enable_syslog = g_key_file_get_boolean (config,\n                                            \"general\", \"enable_syslog\",\n                                            NULL);\n    if (enable_syslog)\n        openlog (NULL, LOG_NDELAY | LOG_PID, LOG_USER);\n}\n#endif\n#endif\n"
  },
  {
    "path": "common/log.h",
    "content": "#ifndef LOG_H\n#define LOG_H\n\n#define SEAFILE_DOMAIN g_quark_from_string(\"seafile\")\n\n#ifndef seaf_warning\n#define seaf_warning(fmt, ...) g_warning(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef seaf_message\n#define seaf_message(fmt, ...) g_message(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef seaf_error\n#define seaf_error(fmt, ...) g_critical(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n\nint seafile_log_init (const char *logfile, const char *ccnet_debug_level_str,\n                      const char *seafile_debug_level_str, const char *_app_name);\nint seafile_log_reopen ();\n\n#ifndef WIN32\n#ifdef SEAFILE_SERVER\nvoid\nset_syslog_config (GKeyFile *config);\n#endif\n#endif\n\nvoid\nseafile_debug_set_flags_string (const gchar *flags_string);\n\ntypedef enum\n{\n    SEAFILE_DEBUG_TRANSFER = 1 << 1,\n    SEAFILE_DEBUG_SYNC = 1 << 2,\n    SEAFILE_DEBUG_WATCH = 1 << 3, /* wt-monitor */\n    SEAFILE_DEBUG_HTTP = 1 << 4,  /* http server */\n    SEAFILE_DEBUG_MERGE = 1 << 5,\n    SEAFILE_DEBUG_OTHER = 1 << 6,\n} SeafileDebugFlags;\n\nvoid seafile_debug_impl (SeafileDebugFlags flag, const gchar *format, ...);\n\n#ifdef DEBUG_FLAG\n\n#undef seaf_debug\n#define seaf_debug(fmt, ...)  \\\n    seafile_debug_impl (DEBUG_FLAG, \"%.10s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n\n#endif  /* DEBUG_FLAG */\n\n#endif\n"
  },
  {
    "path": "common/merge-new.c",
    "content": "#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"merge-new.h\"\n#include \"vc-common.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_MERGE\n#include \"log.h\"\n\nstatic int\nmerge_trees_recursive (const char *store_id, int version,\n                       int n, SeafDir *trees[],\n                       const char *basedir,\n                       MergeOptions *opt);\n\nstatic const char *\nget_nickname_by_modifier (GHashTable *email_to_nickname, const char *modifier)\n{\n    const char *nickname = NULL;\n\n    if (!modifier) {\n        return NULL;\n    }\n\n    nickname = g_hash_table_lookup (email_to_nickname, modifier);\n    if (nickname) {\n        return nickname;\n    }\n\n    nickname = http_tx_manager_get_nickname (modifier);\n\n    if (!nickname) {\n        nickname = g_strdup (modifier);\n    }\n    g_hash_table_insert (email_to_nickname, g_strdup(modifier), nickname);\n\n    return nickname;\n}\n\nstatic char *\nmerge_conflict_filename (const char *store_id, int version,\n                         MergeOptions *opt,\n                         const char *basedir,\n                         const char *filename)\n{\n    char *path = NULL, *modifier = NULL, *conflict_name = NULL;\n    const char *nickname = NULL;\n    gint64 mtime;\n    SeafCommit *commit;\n\n    path = g_strconcat (basedir, filename, NULL);\n\n    int rc = get_file_modifier_mtime (opt->remote_repo_id,\n                                      store_id,\n                                      version,\n                                      opt->remote_head,\n                                      path,\n                                      &modifier, &mtime);\n    if (rc < 0) {\n        commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                 opt->remote_repo_id,\n                                                 version,\n                                                 opt->remote_head);\n        if (!commit) {\n            seaf_warning (\"Failed to find remote head %s:%s.\\n\",\n                          opt->remote_repo_id, opt->remote_head);\n            goto out;\n        }\n        modifier = g_strdup(commit->creator_name);\n        mtime = (gint64)time(NULL);\n        seaf_commit_unref (commit);\n    }\n\n    nickname = modifier;\n    if (seaf->seahub_pk)\n        nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);\n\n    conflict_name = gen_conflict_path (filename, nickname, mtime);\n\nout:\n    g_free (path);\n    g_free (modifier);\n    return conflict_name;\n}\n\nstatic char *\nmerge_conflict_dirname (const char *store_id, int version,\n                        MergeOptions *opt,\n                        const char *basedir,\n                        const char *dirname)\n{\n    char *modifier = NULL, *conflict_name = NULL;\n    const char *nickname = NULL;\n    SeafCommit *commit;\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             opt->remote_repo_id, version,\n                                             opt->remote_head);\n    if (!commit) {\n        seaf_warning (\"Failed to find remote head %s:%s.\\n\",\n                      opt->remote_repo_id, opt->remote_head);\n        goto out;\n    }\n    modifier = g_strdup(commit->creator_name);\n    seaf_commit_unref (commit);\n\n    nickname = modifier;\n    if (seaf->seahub_pk)\n        nickname = get_nickname_by_modifier (opt->email_to_nickname, modifier);\n\n    conflict_name = gen_conflict_path (dirname, nickname, (gint64)time(NULL));\n\nout:\n    g_free (modifier);\n    return conflict_name;\n}\n\nint twoway_merge(const char *store_id, int version, const char *basedir,\n                 SeafDirent *dents[], GList **dents_out, struct MergeOptions *opt)\n{\n    SeafDirent *files[2];\n    int i;\n    int n = opt->n_ways;\n\n    memset (files, 0, sizeof(files[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] && S_ISREG(dents[i]->mode))\n            files[i] = dents[i];\n    }\n\n    SeafDirent *head, *remote;\n    char *conflict_name;\n\n    head = files[0];\n    remote = files[1];\n\n    if (head && remote) {\n        if (strcmp (head->id, remote->id) == 0) {\n            // file match\n            seaf_debug (\"%s%s: files match\\n\", basedir, head->name);\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n        } else {\n            // file content conflict\n            seaf_debug (\"%s%s: files conflict\\n\", basedir, head->name);\n            conflict_name = merge_conflict_filename(store_id, version,\n                                                    opt,\n                                                    basedir,\n                                                    head->name);\n            if (!conflict_name)\n                return -1;\n\n            g_free (remote->name);\n            remote->name = conflict_name;\n            remote->name_len = strlen (remote->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n\n            opt->conflict = TRUE;\n        }\n    } else if (!head && remote) {\n        // file not in head, but in remote\n        seaf_debug (\"%s%s: added in remote\\n\", basedir, remote->name);\n        *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n    } else if (head && !remote) {\n        // file in head, but not in remote\n        seaf_debug (\"%s%s: added in head\\n\", basedir, head->name);\n        *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n    }\n\n    return 0;\n}\n\nstatic int\nthreeway_merge (const char *store_id, int version,\n                 SeafDirent *dents[],\n                 const char *basedir,\n                 GList **dents_out,\n                 MergeOptions *opt)\n{\n    SeafDirent *files[3];\n    int i;\n    gint64 curr_time;\n    int n = opt->n_ways;\n\n    memset (files, 0, sizeof(files[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] && S_ISREG(dents[i]->mode))\n            files[i] = dents[i];\n    }\n\n    SeafDirent *base, *head, *remote;\n    char *conflict_name;\n\n    base = files[0];\n    head = files[1];\n    remote = files[2];\n\n    if (head && remote) {\n        if (strcmp (head->id, remote->id) == 0) {\n            seaf_debug (\"%s%s: files match\\n\", basedir, head->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n        } else if (base && strcmp (base->id, head->id) == 0) {\n            seaf_debug (\"%s%s: unchanged in head, changed in remote\\n\",\n                        basedir, head->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n        } else if (base && strcmp (base->id, remote->id) == 0) {\n            seaf_debug (\"%s%s: unchanged in remote, changed in head\\n\",\n                        basedir, head->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n        } else {\n            /* File content conflict. */\n\n            seaf_debug (\"%s%s: files conflict\\n\", basedir, head->name);\n\n            conflict_name = merge_conflict_filename(store_id, version,\n                                                    opt,\n                                                    basedir,\n                                                    head->name);\n            if (!conflict_name)\n                return -1;\n\n            /* Change remote entry name in place. So opt->callback\n             * will see the conflict name, not the original name.\n             */\n            g_free (remote->name);\n            remote->name = conflict_name;\n            remote->name_len = strlen (remote->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n\n            opt->conflict = TRUE;\n        }\n    } else if (base && !head && remote) {\n        if (strcmp (base->id, remote->id) != 0) {\n            if (dents[1] != NULL) {\n                /* D/F conflict:\n                 * Head replaces file with dir, while remote change the file.\n                 */\n                seaf_debug (\"%s%s: DFC, file -> dir, file\\n\",\n                            basedir, remote->name);\n\n                conflict_name = merge_conflict_filename(store_id, version,\n                                                        opt,\n                                                        basedir,\n                                                        remote->name);\n                if (!conflict_name)\n                    return -1;\n\n                /* Change the name of remote, keep dir name in head unchanged. \n                 */\n                g_free (remote->name);\n                remote->name = conflict_name;\n                remote->name_len = strlen (remote->name);\n\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n\n                opt->conflict = TRUE;\n            } else {\n                /* Deleted in head and changed in remote. */\n\n                seaf_debug (\"%s%s: deleted in head and changed in remote\\n\",\n                            basedir, remote->name);\n\n                /* Keep version of remote. */\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n            }\n        } else {\n            /* If base and remote match, the file should not be added to\n             * the merge result.\n             */\n            seaf_debug (\"%s%s: file deleted in head, unchanged in remote\\n\",\n                        basedir, remote->name);\n        }\n    } else if (base && head && !remote) {\n        if (strcmp (base->id, head->id) != 0) {\n            if (dents[2] != NULL) {\n                /* D/F conflict:\n                 * Remote replaces file with dir, while head change the file.\n                 */\n                seaf_debug (\"%s%s: DFC, file -> file, dir\\n\",\n                            basedir, head->name);\n\n                /* We use remote head commit author name as conflict\n                 * suffix of a dir.\n                 */\n                conflict_name = merge_conflict_dirname (store_id, version,\n                                                        opt,\n                                                        basedir, dents[2]->name);\n                if (!conflict_name)\n                    return -1;\n\n                /* Change remote dir name to conflict name in place. */\n                g_free (dents[2]->name);\n                dents[2]->name = conflict_name;\n                dents[2]->name_len = strlen (dents[2]->name);\n\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n\n                opt->conflict = TRUE;\n            } else {\n                /* Deleted in remote and changed in head. */\n\n                seaf_debug (\"%s%s: deleted in remote and changed in head\\n\",\n                            basedir, head->name);\n\n                /* Keep version of remote. */\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n            }\n        } else {\n            /* If base and head match, the file should not be added to\n             * the merge result.\n             */\n            seaf_debug (\"%s%s: file deleted in remote, unchanged in head\\n\",\n                        basedir, head->name);\n        }\n    } else if (!base && !head && remote) {\n        if (!dents[1]) {\n            /* Added in remote. */\n            seaf_debug (\"%s%s: added in remote\\n\", basedir, remote->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n        } else if (dents[0] != NULL && strcmp(dents[0]->id, dents[1]->id) == 0) {\n            /* Contents in the dir is not changed.\n             * The dir will be deleted in merge_directories().\n             */\n            seaf_debug (\"%s%s: dir in head will be replaced by file in remote\\n\",\n                        basedir, remote->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n        } else {\n            /* D/F conflict:\n             * Contents of the dir is changed in head, while\n             * remote replace the dir with a file.\n             *\n             * Or, head adds a new dir, while remote adds a new file,\n             * with the same name.\n             */\n\n            seaf_debug (\"%s%s: DFC, dir -> dir, file\\n\", basedir, remote->name);\n\n            conflict_name = merge_conflict_filename(store_id, version,\n                                                    opt,\n                                                    basedir,\n                                                    remote->name);\n            if (!conflict_name)\n                return -1;\n\n            g_free (remote->name);\n            remote->name = conflict_name;\n            remote->name_len = strlen (remote->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(remote));\n\n            opt->conflict = TRUE;\n        }\n    } else if (!base && head && !remote) {\n        if (!dents[2]) {\n            /* Added in remote. */\n            seaf_debug (\"%s%s: added in head\\n\", basedir, head->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n        } else if (dents[0] != NULL && strcmp(dents[0]->id, dents[2]->id) == 0) {\n            /* Contents in the dir is not changed.\n             * The dir will be deleted in merge_directories().\n             */\n            seaf_debug (\"%s%s: dir in remote will be replaced by file in head\\n\",\n                        basedir, head->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n        } else {\n            /* D/F conflict:\n             * Contents of the dir is changed in remote, while\n             * head replace the dir with a file.\n             *\n             * Or, remote adds a new dir, while head adds a new file,\n             * with the same name.\n             */\n\n            seaf_debug (\"%s%s: DFC, dir -> file, dir\\n\", basedir, head->name);\n\n            conflict_name = merge_conflict_dirname (store_id, version,\n                                                    opt,\n                                                    basedir, dents[2]->name);\n            if (!conflict_name)\n                return -1;\n\n            g_free (dents[2]->name);\n            dents[2]->name = conflict_name;\n            dents[2]->name_len = strlen (dents[2]->name);\n\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(head));\n\n            opt->conflict = TRUE;\n        }\n    } else if (base && !head && !remote) {\n        /* Don't need to add anything to dents_out. */\n        seaf_debug (\"%s%s: deleted in head and remote\\n\", basedir, base->name);\n    }\n\n    return 0;\n}\n\nstatic int\nmerge_entries (const char *store_id, int version,\n               int n, SeafDirent *dents[],\n               const char *basedir,\n               GList **dents_out,\n               MergeOptions *opt)\n{\n    /* If we're running 2-way merge, it means merge files base on head and remote.\n     */\n    if (n == 2)\n        return twoway_merge (store_id, version, basedir, dents, dents_out, opt);\n\n    /* Otherwise, we're doing a real 3-way merge of the trees.\n     * It means merge files and handle any conflicts.\n     */\n\n    return threeway_merge (store_id, version, dents, basedir, dents_out, opt);\n}\n\nstatic int\nmerge_directories (const char *store_id, int version,\n                   int n, SeafDirent *dents[],\n                   const char *basedir,\n                   GList **dents_out,\n                   MergeOptions *opt)\n{\n    SeafDir *dir;\n    SeafDir *sub_dirs[3];\n    char *dirname = NULL;\n    char *new_basedir;\n    int ret = 0;\n    int dir_mask = 0, i;\n    SeafDirent *merged_dent;\n\n    for (i = 0; i < n; ++i) {\n        if (dents[i] && S_ISDIR(dents[i]->mode))\n            dir_mask |= 1 << i;\n    }\n\n    seaf_debug (\"dir_mask = %d\\n\", dir_mask);\n\n    if (n == 3) {\n        switch (dir_mask) {\n        case 0:\n            g_return_val_if_reached (-1);\n        case 1:\n            /* head and remote are not dirs, nothing to merge. */\n            seaf_debug (\"%s%s: no dir, no need to merge\\n\", basedir, dents[0]->name);\n            return 0;\n        case 2:\n            /* only head is dir, add to result directly, no need to merge. */\n            seaf_debug (\"%s%s: only head is dir\\n\", basedir, dents[1]->name);\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));\n            return 0;\n        case 3:\n            if (strcmp (dents[0]->id, dents[1]->id) == 0) {\n                /* Base and head are the same, but deleted in remote. */\n                seaf_debug (\"%s%s: dir deleted in remote\\n\", basedir, dents[0]->name);\n                return 0;\n            }\n            seaf_debug (\"%s%s: dir changed in head but deleted in remote\\n\",\n                        basedir, dents[1]->name);\n            break;\n        case 4:\n            /* only remote is dir, add to result directly, no need to merge. */\n            seaf_debug (\"%s%s: only remote is dir\\n\", basedir, dents[2]->name);\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2]));\n            return 0;\n        case 5:\n            if (strcmp (dents[0]->id, dents[2]->id) == 0) {\n                /* Base and remote are the same, but deleted in head. */\n                seaf_debug (\"%s%s: dir deleted in head\\n\", basedir, dents[0]->name);\n                return 0;\n            }\n            seaf_debug (\"%s%s: dir changed in remote but deleted in head\\n\",\n                        basedir, dents[2]->name);\n            break;\n        case 6:\n        case 7:\n            if (strcmp (dents[1]->id, dents[2]->id) == 0) {\n                /* Head and remote match. */\n                seaf_debug (\"%s%s: dir is the same in head and remote\\n\",\n                            basedir, dents[1]->name);\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));\n                return 0;\n            } else if (dents[0] && strcmp(dents[0]->id, dents[1]->id) == 0) {\n                seaf_debug (\"%s%s: dir changed in remote but unchanged in head\\n\",\n                            basedir, dents[1]->name);\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[2]));\n                return 0;\n            } else if (dents[0] && strcmp(dents[0]->id, dents[2]->id) == 0) {\n                seaf_debug (\"%s%s: dir changed in head but unchanged in remote\\n\",\n                            basedir, dents[1]->name);\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));\n                return 0;\n            }\n\n            seaf_debug (\"%s%s: dir is changed in both head and remote, \"\n                        \"merge recursively\\n\", basedir, dents[1]->name);\n            break;\n        default:\n            g_return_val_if_reached (-1);\n        }\n    } else if (n == 2) {\n        switch (dir_mask) {\n        case 0:\n            g_return_val_if_reached (-1);\n        case 1:\n            /*head is dir, remote is not dir*/\n            seaf_debug (\"%s%s: only head is dir\\n\", basedir, dents[0]->name);\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[0]));\n            return 0;\n        case 2:\n            /*head is not dir, remote is dir*/\n            seaf_debug (\"%s%s: only remote is dir\\n\", basedir, dents[1]->name);\n            *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));\n            return 0;\n        case 3:\n            if (strcmp (dents[0]->id, dents[1]->id) == 0) {\n                seaf_debug (\"%s%s: dir is the same in head and remote\\n\",\n                            basedir, dents[0]->name);\n                *dents_out = g_list_prepend (*dents_out, seaf_dirent_dup(dents[1]));\n                return 0;\n            }\n            seaf_debug (\"%s%s: dir is changed in head and remote, merge recursively\\n\",\n                        basedir, dents[0]->name);\n            break;\n        default:\n            g_return_val_if_reached (-1);\n        }\n    }\n\n    memset (sub_dirs, 0, sizeof(sub_dirs[0])*n);\n    for (i = 0; i < n; ++i) {\n        if (dents[i] != NULL && S_ISDIR(dents[i]->mode)) {\n            dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                               store_id, version,\n                                               dents[i]->id);\n            if (!dir) {\n                seaf_warning (\"Failed to find dir %s:%s.\\n\", store_id, dents[i]->id);\n                ret = -1;\n                goto free_sub_dirs;\n            }\n            opt->visit_dirs++;\n            sub_dirs[i] = dir;\n\n            dirname = dents[i]->name;\n        }\n    }\n\n    new_basedir = g_strconcat (basedir, dirname, \"/\", NULL);\n\n    ret = merge_trees_recursive (store_id, version, n, sub_dirs, new_basedir, opt);\n\n    g_free (new_basedir);\n\n    if (n == 3) {\n        if (dir_mask == 3 || dir_mask == 6 || dir_mask == 7) {\n            merged_dent = seaf_dirent_dup (dents[1]);\n            memcpy (merged_dent->id, opt->merged_tree_root, 40);\n            *dents_out = g_list_prepend (*dents_out, merged_dent);\n        } else if (dir_mask == 5) {\n            merged_dent = seaf_dirent_dup (dents[2]);\n            memcpy (merged_dent->id, opt->merged_tree_root, 40);\n            *dents_out = g_list_prepend (*dents_out, merged_dent);\n        }\n    } else if (n == 2) {\n        if (dir_mask == 3) {\n            merged_dent = seaf_dirent_dup (dents[1]);\n            memcpy (merged_dent->id, opt->merged_tree_root, 40);\n            *dents_out = g_list_prepend (*dents_out, merged_dent);\n        }\n    }\n\nfree_sub_dirs:\n    for (i = 0; i < n; ++i)\n        seaf_dir_free (sub_dirs[i]);\n\n    return ret;\n}\n\nstatic gint\ncompare_dirents (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *denta = a, *dentb = b;\n\n    return strcmp (dentb->name, denta->name);\n}\n\nstatic int\nmerge_trees_recursive (const char *store_id, int version,\n                       int n, SeafDir *trees[],\n                       const char *basedir,\n                       MergeOptions *opt)\n{\n    GList *ptrs[3];\n    SeafDirent *dents[3];\n    int i;\n    SeafDirent *dent;\n    char *first_name;\n    gboolean done;\n    int ret = 0;\n    SeafDir *merged_tree;\n    GList *merged_dents = NULL;\n\n    for (i = 0; i < n; ++i) {\n        if (trees[i])\n            ptrs[i] = trees[i]->entries;\n        else\n            ptrs[i] = NULL;\n    }\n\n    while (1) {\n        first_name = NULL;\n        memset (dents, 0, sizeof(dents[0])*n);\n        done = TRUE;\n\n        /* Find the \"largest\" name, assuming dirents are sorted. */\n        for (i = 0; i < n; ++i) {\n            if (ptrs[i] != NULL) {\n                done = FALSE;\n                dent = ptrs[i]->data;\n                if (!first_name)\n                    first_name = dent->name;\n                else if (strcmp(dent->name, first_name) > 0)\n                    first_name = dent->name;\n            }\n        }\n\n        if (done)\n            break;\n\n        /*\n         * Setup dir entries for all names that equal to first_name\n         */\n        int n_files = 0, n_dirs = 0;\n        for (i = 0; i < n; ++i) {\n            if (ptrs[i] != NULL) {\n                dent = ptrs[i]->data;\n                if (strcmp(first_name, dent->name) == 0) {\n                    if (S_ISREG(dent->mode))\n                        ++n_files;\n                    else if (S_ISDIR(dent->mode))\n                        ++n_dirs;\n\n                    dents[i] = dent;\n                    ptrs[i] = ptrs[i]->next;\n                }\n            }\n        }\n\n        /* Merge entries of this level. */\n        if (n_files > 0) {\n            ret = merge_entries (store_id, version,\n                                 n, dents, basedir, &merged_dents, opt);\n            if (ret < 0)\n                return ret;\n        }\n\n        /* Recurse into sub level. */\n        if (n_dirs > 0) {\n            ret = merge_directories (store_id, version,\n                                     n, dents, basedir, &merged_dents, opt);\n            if (ret < 0)\n                return ret;\n        }\n    }\n\n    if (n == 3) {\n        merged_dents = g_list_sort (merged_dents, compare_dirents);\n        merged_tree = seaf_dir_new (NULL, merged_dents,\n                                    dir_version_from_repo_version(version));\n\n        memcpy (opt->merged_tree_root, merged_tree->dir_id, 40);\n\n        if ((trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0) ||\n            (trees[2] && strcmp (trees[2]->dir_id, merged_tree->dir_id) == 0)) {\n            seaf_dir_free (merged_tree);\n        } else {\n            ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree);\n            seaf_dir_free (merged_tree);\n            if (ret < 0) {\n                seaf_warning (\"Failed to save merged tree %s:%s.\\n\", store_id, basedir);\n            }\n        }\n    } else if (n == 2) {\n        merged_dents = g_list_sort (merged_dents, compare_dirents);\n        merged_tree = seaf_dir_new (NULL, merged_dents,\n                                    dir_version_from_repo_version(version));\n\n        memcpy (opt->merged_tree_root, merged_tree->dir_id, 40);\n\n        if ((trees[0] && strcmp (trees[0]->dir_id, merged_tree->dir_id) == 0) || \n            (trees[1] && strcmp (trees[1]->dir_id, merged_tree->dir_id) == 0)) {\n            seaf_dir_free (merged_tree);\n        } else {\n            ret = seaf_dir_save (seaf->fs_mgr, store_id, version, merged_tree);\n            seaf_dir_free (merged_tree);\n            if (ret < 0) {\n                seaf_warning (\"Failed to save merged tree %s:%s.\\n\", store_id, basedir);\n            }\n        }\n    }\n\n    return ret;\n}\n\nint\nseaf_merge_trees (const char *store_id, int version,\n                  int n, const char *roots[], MergeOptions *opt)\n{\n    SeafDir **trees, *root;\n    int i, ret;\n\n    g_return_val_if_fail (n == 2 || n == 3, -1);\n\n    opt->email_to_nickname = g_hash_table_new_full(g_str_hash,\n                                                   g_str_equal,\n                                                   g_free,\n                                                   g_free);\n\n    trees = g_new0 (SeafDir *, n);\n    for (i = 0; i < n; ++i) {\n        root = seaf_fs_manager_get_seafdir (seaf->fs_mgr, store_id, version, roots[i]);\n        if (!root) {\n            seaf_warning (\"Failed to find dir %s:%s.\\n\", store_id, roots[i]);\n            g_free (trees);\n            return -1;\n        }\n        trees[i] = root;\n    }\n\n    ret = merge_trees_recursive (store_id, version, n, trees, \"\", opt);\n\n    for (i = 0; i < n; ++i)\n        seaf_dir_free (trees[i]);\n    g_free (trees);\n\n    g_hash_table_destroy (opt->email_to_nickname);\n\n    return ret;\n}\n"
  },
  {
    "path": "common/merge-new.h",
    "content": "#ifndef MERGE_NEW_H\n#define MERGE_NEW_H\n\n#include \"common.h\"\n\n#include \"fs-mgr.h\"\n\nstruct MergeOptions;\n\ntypedef int (*MergeCallback) (const char *basedir,\n                              SeafDirent *dirents[],\n                              struct MergeOptions *opt);\n\ntypedef struct MergeOptions {\n    int                 n_ways; /* only 2 and 3 way merges are supported. */\n\n    MergeCallback       callback;\n    void *              data;\n\n    /* options only used in 3-way merge. */\n    char                remote_repo_id[37];\n    char                remote_head[41];\n    gboolean            do_merge;    /* really merge the contents\n                                      * and handle conflicts */\n    char                merged_tree_root[41]; /* merge result */\n    int                 visit_dirs;\n    gboolean            conflict;\n\n    GHashTable          *email_to_nickname;\n} MergeOptions;\n\nint\nseaf_merge_trees (const char *store_id, int version,\n                  int n, const char *roots[], MergeOptions *opt);\n\n#endif\n"
  },
  {
    "path": "common/mq-mgr.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n#include \"utils.h\"\n#include \"mq-mgr.h\"\n\ntypedef struct SeafMqManagerPriv {\n    // chan <-> async_queue\n    GHashTable *chans;\n} SeafMqManagerPriv;\n\nSeafMqManager *\nseaf_mq_manager_new ()\n{\n    SeafMqManager *mgr = g_new0 (SeafMqManager, 1);\n    mgr->priv = g_new0 (SeafMqManagerPriv, 1);\n    mgr->priv->chans = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                              (GDestroyNotify)g_free,\n                                              (GDestroyNotify)g_async_queue_unref);\n\n    return mgr;\n}\n\nstatic GAsyncQueue *\nseaf_mq_manager_channel_new (SeafMqManager *mgr, const char *channel)\n{\n    GAsyncQueue *async_queue = NULL;\n    async_queue = g_async_queue_new_full ((GDestroyNotify)json_decref);\n\n    g_hash_table_replace (mgr->priv->chans, g_strdup (channel), async_queue);\n\n    return async_queue;\n}\n\nint\nseaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content)\n{\n    int ret = 0;\n\n    if (!channel || !content) {\n        seaf_warning (\"type and content should not be NULL.\\n\");\n        return -1;\n    }\n\n    GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel);\n    if (!async_queue) {\n        async_queue = seaf_mq_manager_channel_new(mgr, channel);\n    }\n\n    if (!async_queue) {\n        seaf_warning(\"%s channel creation failed.\\n\", channel);\n        return -1;\n    }\n\n    json_t *msg = json_object();\n    json_object_set_new (msg, \"content\", json_string(content));\n    json_object_set_new (msg, \"ctime\", json_integer(time(NULL)));\n    g_async_queue_push (async_queue, msg);\n\n    return ret;\n}\n\njson_t *\nseaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel)\n{\n    GAsyncQueue *async_queue = g_hash_table_lookup (mgr->priv->chans, channel);\n    if (!async_queue)\n        return NULL;\n\n    return g_async_queue_try_pop (async_queue);\n}\n"
  },
  {
    "path": "common/mq-mgr.h",
    "content": "#ifndef SEAF_MQ_MANAGER_H\n#define SEAF_MQ_MANAGER_H\n\n#include <jansson.h>\n\n#define SEAFILE_SERVER_CHANNEL_EVENT \"seaf_server.event\"\n#define SEAFILE_SERVER_CHANNEL_STATS \"seaf_server.stats\"\n\nstruct SeafMqManagerPriv;\n\ntypedef struct SeafMqManager {\n    struct SeafMqManagerPriv *priv;\n} SeafMqManager;\n\nSeafMqManager *\nseaf_mq_manager_new ();\n\nint\nseaf_mq_manager_publish_event (SeafMqManager *mgr, const char *channel, const char *content);\n\njson_t *\nseaf_mq_manager_pop_event (SeafMqManager *mgr, const char *channel);\n\n#endif\n"
  },
  {
    "path": "common/obj-backend-fs.c",
    "content": "#ifndef _WIN32_WINNT\n#define _WIN32_WINNT 0x500\n#endif\n\n#include \"common.h\"\n#include \"utils.h\"\n#include \"obj-backend.h\"\n\n#ifndef WIN32\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n#endif\n\n#ifdef WIN32\n#include <windows.h>\n#include <io.h>\n#endif\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n\ntypedef struct FsPriv {\n    char *obj_dir;\n    int   dir_len;\n} FsPriv;\n\nstatic void\nid_to_path (FsPriv *priv, const char *obj_id, char path[],\n            const char *repo_id, int version)\n{\n    char *pos = path;\n    int n;\n\n#if defined MIGRATION || defined SEAFILE_CLIENT\n    if (version > 0) {\n        n = snprintf (path, SEAF_PATH_MAX, \"%s/%s/\", priv->obj_dir, repo_id);\n        pos += n;\n    }\n#else\n    n = snprintf (path, SEAF_PATH_MAX, \"%s/%s/\", priv->obj_dir, repo_id);\n    pos += n;\n#endif\n\n    memcpy (pos, obj_id, 2);\n    pos[2] = '/';\n    pos += 3;\n\n    memcpy (pos, obj_id + 2, 41 - 2);\n}\n\nstatic int\nobj_backend_fs_read (ObjBackend *bend,\n                     const char *repo_id,\n                     int version,\n                     const char *obj_id,\n                     void **data,\n                     int *len)\n{\n    char path[SEAF_PATH_MAX];\n    gsize tmp_len;\n    GError *error = NULL;\n\n    id_to_path (bend->priv, obj_id, path, repo_id, version);\n\n    /* seaf_debug (\"object path: %s\\n\", path); */\n\n    g_file_get_contents (path, (gchar**)data, &tmp_len, &error);\n    if (error) {\n#ifdef MIGRATION\n        g_clear_error (&error);\n        id_to_path (bend->priv, obj_id, path, repo_id, 1);\n        g_file_get_contents (path, (gchar**)data, &tmp_len, &error);\n        if (error) {\n            seaf_debug (\"[obj backend] Failed to read object %s: %s.\\n\",\n                        obj_id, error->message);\n            g_clear_error (&error);\n            return -1;\n        }\n#else\n        seaf_debug (\"[obj backend] Failed to read object %s: %s.\\n\",\n                    obj_id, error->message);\n        g_clear_error (&error);\n        return -1;\n#endif\n    }\n\n    *len = (int)tmp_len;\n    return 0;\n}\n\n/*\n * Flush operating system and disk caches for @fd.\n */\nstatic int\nfsync_obj_contents (int fd)\n{\n#ifdef __linux__\n    /* Some file systems may not support fsync().\n     * In this case, just skip the error.\n     */\n    if (fsync (fd) < 0) {\n        if (errno == EINVAL)\n            return 0;\n        else {\n            seaf_warning (\"Failed to fsync: %s.\\n\", strerror(errno));\n            return -1;\n        }\n    }\n    return 0;\n#endif\n\n#ifdef __APPLE__\n    /* OS X: fcntl() is required to flush disk cache, fsync() only\n     * flushes operating system cache.\n     */\n    if (fcntl (fd, F_FULLFSYNC, NULL) < 0) {\n        seaf_warning (\"Failed to fsync: %s.\\n\", strerror(errno));\n        return -1;\n    }\n    return 0;\n#endif\n\n#ifdef WIN32\n    HANDLE handle;\n\n    handle = (HANDLE)_get_osfhandle (fd);\n    if (handle == INVALID_HANDLE_VALUE) {\n        seaf_warning (\"Failed to get handle from fd.\\n\");\n        return -1;\n    }\n\n    if (!FlushFileBuffers (handle)) {\n        seaf_warning (\"FlushFileBuffer() failed: %lu.\\n\", GetLastError());\n        return -1;\n    }\n\n    return 0;\n#endif\n}\n\n/*\n * Rename file from @tmp_path to @obj_path.\n * This also makes sure the changes to @obj_path's parent folder\n * is flushed to disk.\n */\nstatic int\nrename_and_sync (const char *tmp_path, const char *obj_path)\n{\n#ifdef __linux__\n    char *parent_dir;\n    int ret = 0;\n\n    if (rename (tmp_path, obj_path) < 0) {\n        seaf_warning (\"Failed to rename from %s to %s: %s.\\n\",\n                      tmp_path, obj_path, strerror(errno));\n        return -1;\n    }\n\n    parent_dir = g_path_get_dirname (obj_path);\n    int dir_fd = open (parent_dir, O_RDONLY);\n    if (dir_fd < 0) {\n        seaf_warning (\"Failed to open dir %s: %s.\\n\", parent_dir, strerror(errno));\n        goto out;\n    }\n\n    /* Some file systems don't support fsyncing a directory. Just ignore the error.\n     */\n    if (fsync (dir_fd) < 0) {\n        if (errno != EINVAL) {\n            seaf_warning (\"Failed to fsync dir %s: %s.\\n\",\n                          parent_dir, strerror(errno));\n            ret = -1;\n        }\n        goto out;\n    }\n\nout:\n    g_free (parent_dir);\n    if (dir_fd >= 0)\n        close (dir_fd);\n    return ret;\n#endif\n\n#ifdef __APPLE__\n    /*\n     * OS X garantees an existence of obj_path always exists,\n     * even when the system crashes.\n     */\n    if (rename (tmp_path, obj_path) < 0) {\n        seaf_warning (\"Failed to rename from %s to %s: %s.\\n\",\n                      tmp_path, obj_path, strerror(errno));\n        return -1;\n    }\n    return 0;\n#endif\n\n#ifdef WIN32\n    wchar_t *w_tmp_path = g_utf8_to_utf16 (tmp_path, -1, NULL, NULL, NULL);\n    wchar_t *w_obj_path = g_utf8_to_utf16 (obj_path, -1, NULL, NULL, NULL);\n    int ret = 0;\n\n    if (!MoveFileExW (w_tmp_path, w_obj_path,\n                      MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH)) {\n        seaf_warning (\"MoveFilExW failed: %lu.\\n\", GetLastError());\n        ret = -1;\n        goto out;\n    }\n\nout:\n    g_free (w_tmp_path);\n    g_free (w_obj_path);\n    return ret;\n#endif\n}\n\nstatic int\nsave_obj_contents (const char *path, const void *data, int len, gboolean need_sync)\n{\n    char tmp_path[SEAF_PATH_MAX];\n    int fd;\n\n    snprintf (tmp_path, SEAF_PATH_MAX, \"%s.XXXXXX\", path);\n    fd = g_mkstemp (tmp_path);\n    if (fd < 0) {\n        seaf_warning (\"[obj backend] Failed to open tmp file %s: %s.\\n\",\n                      tmp_path, strerror(errno));\n        return -1;\n    }\n\n    if (writen (fd, data, len) < 0) {\n        seaf_warning (\"[obj backend] Failed to write obj %s: %s.\\n\",\n                      tmp_path, strerror(errno));\n        return -1;\n    }\n\n    if (need_sync && fsync_obj_contents (fd) < 0)\n        return -1;\n\n    /* Close may return error, especially in NFS. */\n    if (close (fd) < 0) {\n        seaf_warning (\"[obj backend Failed close obj %s: %s.\\n\",\n                      tmp_path, strerror(errno));\n        return -1;\n    }\n\n    if (need_sync) {\n        if (rename_and_sync (tmp_path, path) < 0)\n            return -1;\n    } else {\n        if (g_rename (tmp_path, path) < 0) {\n            seaf_warning (\"[obj backend] Failed to rename %s: %s.\\n\",\n                          path, strerror(errno));\n            return -1;\n        }\n    }\n\n    return 0;\n}\n\nstatic int\ncreate_parent_path (const char *path)\n{\n    char *dir = g_path_get_dirname (path);\n    if (!dir)\n        return -1;\n\n    if (g_file_test (dir, G_FILE_TEST_EXISTS)) {\n        g_free (dir);\n        return 0;\n    }\n\n    if (g_mkdir_with_parents (dir, 0777) < 0) {\n        seaf_warning (\"Failed to create object parent path %s: %s.\\n\",\n                      dir, strerror(errno));\n        g_free (dir);\n        return -1;\n    }\n\n    g_free (dir);\n    return 0;\n}\n\nstatic int\nobj_backend_fs_write (ObjBackend *bend,\n                      const char *repo_id,\n                      int version,\n                      const char *obj_id,\n                      void *data,\n                      int len,\n                      gboolean need_sync)\n{\n    char path[SEAF_PATH_MAX];\n\n    id_to_path (bend->priv, obj_id, path, repo_id, version);\n\n    /* GTimeVal s, e; */\n\n    /* g_get_current_time (&s); */\n\n    if (create_parent_path (path) < 0) {\n        seaf_warning (\"[obj backend] Failed to create path for obj %s:%s.\\n\",\n                      repo_id, obj_id);\n        return -1;\n    }\n\n    if (save_obj_contents (path, data, len, need_sync) < 0) {\n        seaf_warning (\"[obj backend] Failed to write obj %s:%s.\\n\",\n                      repo_id, obj_id);\n        return -1;\n    }\n\n    /* g_get_current_time (&e); */\n\n    /* seaf_message (\"write obj time: %ldus.\\n\", */\n    /*               ((e.tv_sec*1000000+e.tv_usec) - (s.tv_sec*1000000+s.tv_usec))); */\n\n    return 0;\n}\n\nstatic gboolean\nobj_backend_fs_exists (ObjBackend *bend,\n                       const char *repo_id,\n                       int version,\n                       const char *obj_id)\n{\n    char path[SEAF_PATH_MAX];\n    SeafStat st;\n\n    id_to_path (bend->priv, obj_id, path, repo_id, version);\n\n    if (seaf_stat (path, &st) == 0)\n        return TRUE;\n\n    return FALSE;\n}\n\nstatic void\nobj_backend_fs_delete (ObjBackend *bend,\n                       const char *repo_id,\n                       int version,\n                       const char *obj_id)\n{\n    char path[SEAF_PATH_MAX];\n\n    id_to_path (bend->priv, obj_id, path, repo_id, version);\n    g_unlink (path);\n}\n\nstatic int\nobj_backend_fs_foreach_obj (ObjBackend *bend,\n                            const char *repo_id,\n                            int version,\n                            SeafObjFunc process,\n                            void *user_data)\n{\n    FsPriv *priv = bend->priv;\n    char *obj_dir = NULL;\n    int dir_len;\n    GDir *dir1 = NULL, *dir2;\n    const char *dname1, *dname2;\n    char obj_id[128];\n    char path[SEAF_PATH_MAX], *pos;\n    int ret = 0;\n\n#if defined MIGRATION || defined SEAFILE_CLIENT\n    if (version > 0)\n        obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL);\n#else\n    obj_dir = g_build_filename (priv->obj_dir, repo_id, NULL);\n#endif\n    dir_len = strlen (obj_dir);\n\n    dir1 = g_dir_open (obj_dir, 0, NULL);\n    if (!dir1) {\n        goto out;\n    }\n\n    memcpy (path, obj_dir, dir_len);\n    pos = path + dir_len;\n\n    while ((dname1 = g_dir_read_name(dir1)) != NULL) {\n        snprintf (pos, sizeof(path) - dir_len, \"/%s\", dname1);\n\n        dir2 = g_dir_open (path, 0, NULL);\n        if (!dir2) {\n            seaf_warning (\"Failed to open object dir %s.\\n\", path);\n            continue;\n        }\n\n        while ((dname2 = g_dir_read_name(dir2)) != NULL) {\n            snprintf (obj_id, sizeof(obj_id), \"%s%s\", dname1, dname2);\n            if (!process (repo_id, version, obj_id, user_data)) {\n                g_dir_close (dir2);\n                goto out;\n            }\n        }\n        g_dir_close (dir2);\n    }\n\nout:\n    if (dir1)\n        g_dir_close (dir1);\n    g_free (obj_dir);\n\n    return ret;\n}\n\nstatic int\nobj_backend_fs_copy (ObjBackend *bend,\n                     const char *src_repo_id,\n                     int src_version,\n                     const char *dst_repo_id,\n                     int dst_version,\n                     const char *obj_id)\n{\n    char src_path[SEAF_PATH_MAX];\n    char dst_path[SEAF_PATH_MAX];\n\n    id_to_path (bend->priv, obj_id, src_path, src_repo_id, src_version);\n    id_to_path (bend->priv, obj_id, dst_path, dst_repo_id, dst_version);\n\n    if (g_file_test (dst_path, G_FILE_TEST_EXISTS))\n        return 0;\n\n    if (create_parent_path (dst_path) < 0) {\n        seaf_warning (\"Failed to create dst path %s for obj %s.\\n\",\n                      dst_path, obj_id);\n        return -1;\n    }\n\n#ifdef WIN32\n    if (!CreateHardLink (dst_path, src_path, NULL)) {\n        seaf_warning (\"Failed to link %s to %s: %lu.\\n\",\n                      src_path, dst_path, GetLastError());\n        return -1;\n    }\n    return 0;\n#else\n    int ret = link (src_path, dst_path);\n    if (ret < 0 && errno != EEXIST) {\n        seaf_warning (\"Failed to link %s to %s: %s.\\n\",\n                      src_path, dst_path, strerror(errno));\n        return -1;\n    }\n    return ret;\n#endif\n}\n\nstatic int\nobj_backend_fs_remove_store (ObjBackend *bend, const char *store_id)\n{\n    FsPriv *priv = bend->priv;\n    char *obj_dir = NULL;\n    GDir *dir1, *dir2;\n    const char *dname1, *dname2;\n    char *path1, *path2;\n\n    obj_dir = g_build_filename (priv->obj_dir, store_id, NULL);\n\n    dir1 = g_dir_open (obj_dir, 0, NULL);\n    if (!dir1) {\n        g_free (obj_dir);\n        return 0;\n    }\n\n    while ((dname1 = g_dir_read_name(dir1)) != NULL) {\n        path1 = g_build_filename (obj_dir, dname1, NULL);\n\n        dir2 = g_dir_open (path1, 0, NULL);\n        if (!dir2) {\n            seaf_warning (\"Failed to open obj dir %s.\\n\", path1);\n            g_dir_close (dir1);\n            g_free (path1);\n            g_free (obj_dir);\n            return -1;\n        }\n\n        while ((dname2 = g_dir_read_name(dir2)) != NULL) {\n            path2 = g_build_filename (path1, dname2, NULL);\n            g_unlink (path2);\n            g_free (path2);\n        }\n        g_dir_close (dir2);\n\n        g_rmdir (path1);\n        g_free (path1);\n    }\n\n    g_dir_close (dir1);\n    g_rmdir (obj_dir);\n    g_free (obj_dir);\n\n    return 0;\n}\n\nObjBackend *\nobj_backend_fs_new (const char *seaf_dir, const char *obj_type)\n{\n    ObjBackend *bend;\n    FsPriv *priv;\n\n    bend = g_new0(ObjBackend, 1);\n    priv = g_new0(FsPriv, 1);\n    bend->priv = priv;\n\n    priv->obj_dir = g_build_filename (seaf_dir, \"storage\", obj_type, NULL);\n    priv->dir_len = strlen (priv->obj_dir);\n\n    if (g_mkdir_with_parents (priv->obj_dir, 0777) < 0) {\n        seaf_warning (\"[Obj Backend] Objects dir %s does not exist and\"\n                   \" is unable to create\\n\", priv->obj_dir);\n        goto onerror;\n    }\n\n    bend->read = obj_backend_fs_read;\n    bend->write = obj_backend_fs_write;\n    bend->exists = obj_backend_fs_exists;\n    bend->delete = obj_backend_fs_delete;\n    bend->foreach_obj = obj_backend_fs_foreach_obj;\n    bend->copy = obj_backend_fs_copy;\n    bend->remove_store = obj_backend_fs_remove_store;\n\n    return bend;\n\nonerror:\n    g_free (priv->obj_dir);\n    g_free (priv);\n    g_free (bend);\n\n    return NULL;\n}\n"
  },
  {
    "path": "common/obj-backend-riak.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n#include \"obj-backend.h\"\n\n#ifdef RIAK_BACKEND\n\n#include \"riak-client.h\"\n\n#include <pthread.h>\n\ntypedef struct RiakPriv {\n    const char *host;\n    const char *port;\n    const char *bucket;\n    int n_write;\n\n    GQueue *conn_pool;\n    pthread_mutex_t lock;\n} RiakPriv;\n\nstatic SeafRiakClient *\nget_connection (RiakPriv *priv)\n{\n    SeafRiakClient *connection;\n\n    pthread_mutex_lock (&priv->lock);\n\n    connection = g_queue_pop_head (priv->conn_pool);\n    if (!connection)\n        connection = seaf_riak_client_new (priv->host, priv->port);\n    pthread_mutex_unlock (&priv->lock);\n    return connection;\n}\n\nstatic void\nreturn_connection (RiakPriv *priv, SeafRiakClient *connection)\n{\n    pthread_mutex_lock (&priv->lock);\n    g_queue_push_tail (priv->conn_pool, connection);\n    pthread_mutex_unlock (&priv->lock);\n}\n\nstatic int\nobj_backend_riak_read (ObjBackend *bend,\n                       const char *obj_id,\n                       void **data,\n                       int *len)\n{\n    SeafRiakClient *conn = get_connection (bend->priv);\n    RiakPriv *priv = bend->priv;\n    int ret;\n\n    ret = seaf_riak_client_get (conn, priv->bucket, obj_id, data, len);\n\n    return_connection (priv, conn);\n    return ret;\n}\n\nstatic int\nobj_backend_riak_write (ObjBackend *bend,\n                        const char *obj_id,\n                        void *data,\n                        int len)\n{\n    SeafRiakClient *conn = get_connection (bend->priv);\n    RiakPriv *priv = bend->priv;\n    int ret;\n\n    ret = seaf_riak_client_put (conn, priv->bucket, obj_id, data, len,\n                                priv->n_write);\n\n    return_connection (priv, conn);\n    return ret;\n}\n\nstatic gboolean\nobj_backend_riak_exists (ObjBackend *bend,\n                         const char *obj_id)\n{\n    SeafRiakClient *conn = get_connection (bend->priv);\n    RiakPriv *priv = bend->priv;\n    gboolean ret;\n\n    ret = seaf_riak_client_query (conn, priv->bucket, obj_id);\n\n    return_connection (priv, conn);\n    return ret;\n}\n\nstatic void\nobj_backend_riak_delete (ObjBackend *bend,\n                         const char *obj_id)\n{\n    SeafRiakClient *conn = get_connection (bend->priv);\n    RiakPriv *priv = bend->priv;\n\n    seaf_riak_client_delete (conn, priv->bucket, obj_id, priv->n_write);\n\n    return_connection (priv, conn);\n}\n\nObjBackend *\nobj_backend_riak_new (const char *host,\n                      const char *port,\n                      const char *bucket,\n                      const char *write_policy)\n{\n    ObjBackend *bend;\n    RiakPriv *priv;\n\n    bend = g_new0(ObjBackend, 1);\n    priv = g_new0(RiakPriv, 1);\n    bend->priv = priv;\n\n    priv->host = g_strdup (host);\n    priv->port = g_strdup (port);\n    priv->bucket = g_strdup (bucket);\n    if (strcmp (write_policy, \"quorum\") == 0)\n        priv->n_write = RIAK_QUORUM;\n    else if (strcmp (write_policy, \"all\") == 0)\n        priv->n_write = RIAK_ALL;\n    else\n        g_return_val_if_reached (NULL);\n\n    priv->conn_pool = g_queue_new ();\n    pthread_mutex_init (&priv->lock, NULL);\n\n    bend->read = obj_backend_riak_read;\n    bend->write = obj_backend_riak_write;\n    bend->exists = obj_backend_riak_exists;\n    bend->delete = obj_backend_riak_delete;\n\n    return bend;\n}\n\n#else\n\nObjBackend *\nobj_backend_riak_new (const char *host,\n                      const char *port,\n                      const char *bucket,\n                      const char *write_policy)\n{\n    seaf_warning (\"Riak backend is not enabled.\\n\");\n    return NULL;\n}\n\n#endif  /* RIAK_BACKEND */\n"
  },
  {
    "path": "common/obj-backend.h",
    "content": "#ifndef OBJ_BACKEND_H\n#define OBJ_BACKEND_H\n\n#include <glib.h>\n#include \"obj-store.h\"\n\ntypedef struct ObjBackend ObjBackend;\n\nstruct ObjBackend {\n    int         (*read) (ObjBackend *bend,\n                         const char *repo_id,\n                         int version,\n                         const char *obj_id,\n                         void **data,\n                         int *len);\n\n    int         (*write) (ObjBackend *bend,\n                          const char *repo_id,\n                          int version,\n                          const char *obj_id,\n                          void *data,\n                          int len,\n                          gboolean need_sync);\n\n    gboolean    (*exists) (ObjBackend *bend,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id);\n\n    void        (*delete) (ObjBackend *bend,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id);\n\n    int         (*foreach_obj) (ObjBackend *bend,\n                                const char *repo_id,\n                                int version,\n                                SeafObjFunc process,\n                                void *user_data);\n\n    int         (*copy) (ObjBackend *bend,\n                         const char *src_repo_id,\n                         int src_version,\n                         const char *dst_repo_id,\n                         int dst_version,\n                         const char *obj_id);\n\n    int        (*remove_store) (ObjBackend *bend,\n                                const char *store_id);\n\n    void *priv;\n};\n\n#endif\n"
  },
  {
    "path": "common/obj-cache.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n#include \"redis-cache.h\"\n#include \"obj-cache.h\"\n\n#define DEFAULT_MEMCACHED_EXPIRY 24 * 3600\n#define DEFAULT_MAX_CONNECTIONS 100\n\ntypedef struct CacheOption {\n    char *cache_provider;\n    char *redis_host;\n    char *redis_passwd;\n    int redis_port;\n    int redis_max_connections;\n    int redis_expiry;\n} CacheOption;\n\nstatic void\ncache_option_free (CacheOption *option)\n{\n    if (!option)\n        return;\n    g_free (option->cache_provider);\n    g_free (option->redis_host);\n    g_free (option->redis_passwd);\n    g_free (option);\n}\n\nstatic void\nload_cache_option_from_env (CacheOption *option)\n{\n    const char *cache_provider, *redis_host, *redis_port, *redis_passwd, *redis_max_conn, *redis_expiry;\n\n    cache_provider = g_getenv(\"CACHE_PROVIDER\");\n    redis_host = g_getenv(\"REDIS_HOST\");\n    redis_port = g_getenv(\"REDIS_PORT\");\n    redis_passwd = g_getenv(\"REDIS_PASSWORD\");\n    redis_max_conn = g_getenv(\"REDIS_MAX_CONNECTIONS\");\n    redis_expiry = g_getenv(\"REDIS_EXPIRY\");\n\n    if (!cache_provider || g_strcmp0 (cache_provider, \"\") == 0) {\n        return;\n    }\n\n    if (cache_provider) {\n        g_free (option->cache_provider);\n        option->cache_provider = g_strdup (cache_provider);\n    }\n    if (redis_host && g_strcmp0(redis_host, \"\") != 0) {\n        g_free (option->redis_host);\n        option->redis_host = g_strdup (redis_host);\n    }\n    if (redis_port && g_strcmp0(redis_port, \"\") != 0) {\n        option->redis_port = atoi (redis_port);\n    }\n    if (redis_passwd && g_strcmp0 (redis_passwd, \"\") != 0) {\n        g_free (option->redis_passwd);\n        option->redis_passwd = g_strdup (redis_passwd);\n    }\n    if (redis_max_conn && g_strcmp0 (redis_max_conn, \"\") != 0) {\n        option->redis_max_connections = atoi (redis_max_conn);\n    }\n    if (redis_expiry && g_strcmp0 (redis_expiry, \"\") != 0) {\n        option->redis_expiry = atoi (redis_expiry);\n    }\n}\n\nObjCache *\nobjcache_new (GKeyFile *config)\n{\n    ObjCache *cache = NULL;\n    GError *error = NULL;\n    CacheOption *option = g_new0 (CacheOption, 1);\n    int redis_port;\n    int redis_expiry;\n    int redis_max_connections;\n\n    redis_expiry = DEFAULT_MEMCACHED_EXPIRY;\n    redis_port = 6379;\n    redis_max_connections = DEFAULT_MAX_CONNECTIONS;\n\n    option->redis_port = redis_port;\n    option->redis_max_connections = redis_max_connections;\n    option->redis_expiry = redis_expiry;\n\n    load_cache_option_from_env (option);\n\n    if (g_strcmp0 (option->cache_provider, \"redis\") == 0) {\n        cache = redis_cache_new (option->redis_host, option->redis_passwd, option->redis_port, option->redis_expiry, option->redis_max_connections);\n    } else if (option->cache_provider){\n        seaf_warning (\"Unsupported cache provider: %s\\n\", option->cache_provider);\n    }\n\n    cache_option_free (option);\n\n    return cache;\n}\n\nvoid *\nobjcache_get_object (ObjCache *cache, const char *obj_id, size_t *len)\n{\n    return cache->get_object (cache, obj_id, len);\n}\n\nint\nobjcache_set_object (ObjCache *cache,\n                    const char *obj_id,\n                    const void *object,\n                    int len,\n                    int expiry)\n{\n    return cache->set_object (cache, obj_id, object, len, expiry);\n}\n\ngboolean\nobjcache_test_object (ObjCache *cache, const char *obj_id)\n{\n    return cache->test_object (cache, obj_id);\n}\n\nint\nobjcache_delete_object (ObjCache *cache, const char *obj_id)\n{\n    return cache->delete_object (cache, obj_id);\n}\n\nint\nobjcache_set_object_existence (ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix)\n{\n    char *key;\n    char buf[8];\n    int n;\n    int ret;\n\n    key = g_strdup_printf (\"%s%s\", existence_prefix, obj_id);\n    n = snprintf (buf, sizeof(buf), \"%d\", val);\n\n    ret = cache->set_object (cache, key, buf, n+1, expiry);\n\n    g_free (key);\n    return ret;\n}\n\nint\nobjcache_get_object_existence (ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix)\n{\n    char *key;\n    size_t len;\n    char *val;\n    int ret = 0;\n\n    key = g_strdup_printf (\"%s%s\", existence_prefix, obj_id);\n\n    val = cache->get_object (cache, key, &len);\n    if (!val)\n        ret = -1;\n    else \n        *val_out = atoi(val);\n\n    g_free (key);\n    g_free (val);\n    return ret;\n}\n\nint\nobjcache_delete_object_existence (ObjCache *cache, const char *obj_id, const char *existence_prefix)\n{\n    char *key;\n    int ret;\n\n    key = g_strdup_printf (\"%s%s\", existence_prefix, obj_id);\n\n    ret = cache->delete_object (cache, key);\n\n    g_free (key);\n    return ret;\n}\n\nint\nobjcache_publish (ObjCache *cache, const char *channel, const char *msg)\n{\n    int ret;\n    ret = cache->publish (cache, channel, msg);\n    return ret;\n}\n\nint\nobjcache_push (ObjCache *cache, const char *list, const char *msg)\n{\n    int ret;\n    ret = cache->push (cache, list, msg);\n    return ret;\n}\n"
  },
  {
    "path": "common/obj-cache.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef OBJ_CACHE_H\n#define OBJ_CACHE_H\n\n#define DEFAULT_MEMCACHED_EXPIRY 24 * 3600\n\n#define TYPE_REDIS 0x02\n\ntypedef struct ObjCache ObjCache;\n\nstruct ObjCache {\n    void*       (*get_object) (ObjCache *cache,\n                               const char *obj_id,\n                               size_t *len);\n\n    int         (*set_object) (ObjCache *cache,\n                               const char *obj_id,\n                               const void *object,\n                               int len,\n                               int expiry);\n\n    gboolean    (*test_object) (ObjCache *cache,\n                                const char *obj_id);\n\n    int         (*delete_object) (ObjCache *cache,\n                                  const char *obj_id);\n\n    int         (*publish) (ObjCache *cache,\n                            const char *channel,\n                            const char *msg);\n\n    int         (*push) (ObjCache *cache,\n                            const char *list,\n                            const char *msg);\n\n    int mc_expiry;\n    char *host;\n    int port;\n    char cache_type;\n\n    void *priv;\n};\n\nObjCache *\nobjcache_new ();\n\nvoid *\nobjcache_get_object (struct ObjCache *cache, const char *obj_id, size_t *len);\n\nint\nobjcache_set_object (struct ObjCache *cache,\n                    const char *obj_id,\n                    const void *object,\n                    int len,\n                    int expiry);\n\ngboolean\nobjcache_test_object (struct ObjCache *cache, const char *obj_id);\n\nint\nobjcache_delete_object (struct ObjCache *cache, const char *obj_id);\n\nint\nobjcache_set_object_existence (struct ObjCache *cache, const char *obj_id, int val, int expiry, const char *existence_prefix);\n\nint\nobjcache_get_object_existence (struct ObjCache *cache, const char *obj_id, int *val_out, const char *existence_prefix);\n\nint\nobjcache_delete_object_existence (struct ObjCache *cache, const char *obj_id, const char *existence_prefix);\n\nint\nobjcache_publish (ObjCache *cache, const char *channel, const char *msg);\n\nint\nobjcache_push (ObjCache *cache, const char *list, const char *msg);\n\n#endif\n"
  },
  {
    "path": "common/obj-store.c",
    "content": "#include \"common.h\"\n\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n\n#include \"utils.h\"\n\n#include \"obj-backend.h\"\n#include \"obj-store.h\"\n\nstruct SeafObjStore {\n    ObjBackend   *bend;\n};\ntypedef struct SeafObjStore SeafObjStore;\n\nextern ObjBackend *\nobj_backend_fs_new (const char *seaf_dir, const char *obj_type);\n\nstruct SeafObjStore *\nseaf_obj_store_new (SeafileSession *seaf, const char *obj_type)\n{\n    SeafObjStore *store = g_new0 (SeafObjStore, 1);\n\n    if (!store)\n        return NULL;\n\n    store->bend = obj_backend_fs_new (seaf->seaf_dir, obj_type);\n    if (!store->bend) {\n        seaf_warning (\"[Object store] Failed to load backend.\\n\");\n        g_free (store);\n        return NULL;\n    }\n\n    return store;\n}\n\nint\nseaf_obj_store_init (SeafObjStore *obj_store)\n{\n    return 0;\n}\n\nint\nseaf_obj_store_read_obj (struct SeafObjStore *obj_store,\n                         const char *repo_id,\n                         int version,\n                         const char *obj_id,\n                         void **data,\n                         int *len)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    if (!repo_id || !is_uuid_valid(repo_id) ||\n        !obj_id || !is_object_id_valid(obj_id))\n        return -1;\n\n    return bend->read (bend, repo_id, version, obj_id, data, len);\n}\n\nint\nseaf_obj_store_write_obj (struct SeafObjStore *obj_store,\n                          const char *repo_id,\n                          int version,\n                          const char *obj_id,\n                          void *data,\n                          int len,\n                          gboolean need_sync)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    if (!repo_id || !is_uuid_valid(repo_id) ||\n        !obj_id || !is_object_id_valid(obj_id))\n        return -1;\n\n    return bend->write (bend, repo_id, version, obj_id, data, len, need_sync);\n}\n\ngboolean\nseaf_obj_store_obj_exists (struct SeafObjStore *obj_store,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    if (!repo_id || !is_uuid_valid(repo_id) ||\n        !obj_id || !is_object_id_valid(obj_id))\n        return FALSE;\n\n    return bend->exists (bend, repo_id, version, obj_id);\n}\n\nvoid\nseaf_obj_store_delete_obj (struct SeafObjStore *obj_store,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    if (!repo_id || !is_uuid_valid(repo_id) ||\n        !obj_id || !is_object_id_valid(obj_id))\n        return;\n\n    return bend->delete (bend, repo_id, version, obj_id);\n}\n\nint\nseaf_obj_store_foreach_obj (struct SeafObjStore *obj_store,\n                            const char *repo_id,\n                            int version,\n                            SeafObjFunc process,\n                            void *user_data)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    return bend->foreach_obj (bend, repo_id, version, process, user_data);\n}\n\nint\nseaf_obj_store_copy_obj (struct SeafObjStore *obj_store,\n                         const char *src_repo_id,\n                         int src_version,\n                         const char *dst_repo_id,\n                         int dst_version,\n                         const char *obj_id)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    if (strcmp (obj_id, EMPTY_SHA1) == 0)\n        return 0;\n\n    return bend->copy (bend, src_repo_id, src_version, dst_repo_id, dst_version, obj_id);\n}\n\nint\nseaf_obj_store_remove_store (struct SeafObjStore *obj_store,\n                             const char *store_id)\n{\n    ObjBackend *bend = obj_store->bend;\n\n    return bend->remove_store (bend, store_id);\n}\n"
  },
  {
    "path": "common/obj-store.h",
    "content": "#ifndef OBJ_STORE_H\n#define OBJ_STORE_H\n\n#include <glib.h>\n#include <sys/types.h>\n\nstruct _SeafileSession;\nstruct SeafObjStore;\n\nstruct SeafObjStore *\nseaf_obj_store_new (struct _SeafileSession *seaf, const char *obj_type);\n\nint\nseaf_obj_store_init (struct SeafObjStore *obj_store);\n\n/* Synchronous I/O interface. */\n\nint\nseaf_obj_store_read_obj (struct SeafObjStore *obj_store,\n                         const char *repo_id,\n                         int version,\n                         const char *obj_id,\n                         void **data,\n                         int *len);\n\nint\nseaf_obj_store_write_obj (struct SeafObjStore *obj_store,\n                          const char *repo_id,\n                          int version,\n                          const char *obj_id,\n                          void *data,\n                          int len,\n                          gboolean need_sync);\n\ngboolean\nseaf_obj_store_obj_exists (struct SeafObjStore *obj_store,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id);\n\nvoid\nseaf_obj_store_delete_obj (struct SeafObjStore *obj_store,\n                           const char *repo_id,\n                           int version,\n                           const char *obj_id);\n\ntypedef gboolean (*SeafObjFunc) (const char *repo_id,\n                                 int version,\n                                 const char *obj_id,\n                                 void *user_data);\n\nint\nseaf_obj_store_foreach_obj (struct SeafObjStore *obj_store,\n                            const char *repo_id,\n                            int version,\n                            SeafObjFunc process,\n                            void *user_data);\n\nint\nseaf_obj_store_copy_obj (struct SeafObjStore *obj_store,\n                         const char *src_store_id,\n                         int src_version,\n                         const char *dst_store_id,\n                         int dst_version,\n                         const char *obj_id);\n\nint\nseaf_obj_store_remove_store (struct SeafObjStore *obj_store,\n                             const char *store_id);\n\n#endif\n"
  },
  {
    "path": "common/object-list.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"object-list.h\"\n\n\nObjectList *\nobject_list_new ()\n{\n    ObjectList *ol = g_new0 (ObjectList, 1);\n\n    ol->obj_hash = g_hash_table_new_full (g_str_hash, g_str_equal, NULL, NULL);\n    ol->obj_ids = g_ptr_array_new_with_free_func (g_free);\n\n    return ol;\n}\n\nvoid\nobject_list_free (ObjectList *ol)\n{\n    if (ol->obj_hash)\n        g_hash_table_destroy (ol->obj_hash);\n    g_ptr_array_free (ol->obj_ids, TRUE);\n    g_free (ol);\n}\n\nvoid\nobject_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len)\n{\n    uint32_t i;\n    uint32_t offset = 0;\n    uint8_t *buf;\n    int ollen = object_list_length(ol);\n\n    buf = g_new (uint8_t, 41 * ollen);\n    for (i = 0; i < ollen; ++i) {\n        memcpy (&buf[offset], g_ptr_array_index(ol->obj_ids, i), 41);\n        offset += 41;\n    }\n\n    *buffer = buf;\n    *len = 41 * ollen;\n}\n\ngboolean\nobject_list_insert (ObjectList *ol, const char *object_id)\n{\n    if (g_hash_table_lookup (ol->obj_hash, object_id))\n        return FALSE;\n    char *id = g_strdup(object_id);\n    g_hash_table_replace (ol->obj_hash, id, id);\n    g_ptr_array_add (ol->obj_ids, id);\n    return TRUE;\n}\n"
  },
  {
    "path": "common/object-list.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef OBJECT_LIST_H\n#define OBJECT_LIST_H\n\n#include <glib.h>\n\ntypedef struct {\n    GHashTable  *obj_hash;\n    GPtrArray   *obj_ids;\n} ObjectList;\n\n\nObjectList *\nobject_list_new ();\n\nvoid\nobject_list_free (ObjectList *ol);\n\nvoid\nobject_list_serialize (ObjectList *ol, uint8_t **buffer, uint32_t *len);\n\n/**\n * Add object to ObjectList.\n * Return FALSE if it is already in the list, TRUE otherwise. \n */\ngboolean\nobject_list_insert (ObjectList *ol, const char *object_id);\n\ninline static gboolean\nobject_list_exists (ObjectList *ol, const char *object_id)\n{\n    return (g_hash_table_lookup(ol->obj_hash, object_id) != NULL);\n}\n\ninline static int\nobject_list_length (ObjectList *ol)\n{\n    return ol->obj_ids->len;\n}\n\n#endif\n"
  },
  {
    "path": "common/org-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"seaf-db.h\"\n#include \"org-mgr.h\"\n#include \"seaf-utils.h\"\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#define DEFAULT_MAX_CONNECTIONS 100\n\nstruct _CcnetOrgManagerPriv\n{\n    CcnetDB\t*db;\n};\n\nstatic int open_db (CcnetOrgManager *manager);\nstatic int check_db_table (CcnetDB *db);\n\nCcnetOrgManager* ccnet_org_manager_new (SeafileSession *session)\n{\n    CcnetOrgManager *manager = g_new0 (CcnetOrgManager, 1);\n\n    manager->session = session;\n    manager->priv = g_new0 (CcnetOrgManagerPriv, 1);\n\n    return manager;\n}\n\nint\nccnet_org_manager_init (CcnetOrgManager *manager)\n{\n    return 0;\n}\n\nint\nccnet_org_manager_prepare (CcnetOrgManager *manager)\n{\n    return open_db (manager);\n}\n\nstatic CcnetDB *\nopen_sqlite_db (CcnetOrgManager *manager)\n{\n    CcnetDB *db = NULL;\n    char *db_dir;\n    char *db_path;\n\n    db_dir = g_build_filename (manager->session->ccnet_dir, \"OrgMgr\", NULL);\n    if (checkdir_with_mkdir(db_dir) < 0) {\n        ccnet_error (\"Cannot open db dir %s: %s\\n\", db_dir,\n                     strerror(errno));\n        g_free (db_dir);\n        return NULL;\n    }\n    g_free (db_dir);\n\n    db_path = g_build_filename (manager->session->ccnet_dir, \"OrgMgr\",\n                                \"orgmgr.db\", NULL);\n    db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);\n\n    g_free (db_path);\n\n    return db;\n}\n\nstatic int\nopen_db (CcnetOrgManager *manager)\n{\n    CcnetDB *db = NULL;\n\n    switch (seaf_db_type(manager->session->ccnet_db)) {\n    case SEAF_DB_TYPE_SQLITE:\n        db = open_sqlite_db (manager);\n        break;\n    case SEAF_DB_TYPE_PGSQL:\n    case SEAF_DB_TYPE_MYSQL:\n        db = manager->session->ccnet_db;\n        break;\n    }\n\n    if (!db)\n        return -1;\n    \n    manager->priv->db = db;\n    if ((manager->session->create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n         && check_db_table (db) < 0) {\n        ccnet_warning (\"Failed to create org db tables.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nvoid ccnet_org_manager_start (CcnetOrgManager *manager)\n{\n}\n\n/* -------- Group Database Management ---------------- */\n\nstatic int check_db_table (CcnetDB *db)\n{\n    char *sql;\n\n    int db_type = seaf_db_type (db);\n    if (db_type == SEAF_DB_TYPE_MYSQL) {\n        sql = \"CREATE TABLE IF NOT EXISTS Organization (org_id BIGINT\"\n            \" PRIMARY KEY AUTO_INCREMENT, org_name VARCHAR(255),\"\n            \" url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT,\"\n            \" UNIQUE INDEX (url_prefix))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        \n        sql = \"CREATE TABLE IF NOT EXISTS OrgUser ( \"\n            \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, \"\n            \"email VARCHAR(255), is_staff BOOL NOT NULL, \"\n            \"INDEX (email), UNIQUE INDEX(org_id, email))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgGroup (\"\n            \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, org_id INTEGER, \"\n            \"group_id INTEGER, INDEX (group_id), \"\n            \"UNIQUE INDEX(org_id, group_id))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        \n    } else if (db_type == SEAF_DB_TYPE_SQLITE) {\n        sql = \"CREATE TABLE IF NOT EXISTS Organization (org_id INTEGER\"\n            \" PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255),\"\n            \" url_prefix VARCHAR(255), \"\n            \" creator VARCHAR(255), ctime BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on \"\n            \"Organization (url_prefix)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        \n        sql = \"CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, \"\n            \"email TEXT, is_staff bool NOT NULL)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS email_indx on \"\n            \"OrgUser (email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on \"\n            \"OrgUser (org_id, email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, \"\n            \"group_id INTEGER)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS org_group_indx on \"\n            \"OrgGroup (org_id, group_id)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    } else if (db_type == SEAF_DB_TYPE_PGSQL) {\n        sql = \"CREATE TABLE IF NOT EXISTS Organization (org_id SERIAL\"\n            \" PRIMARY KEY, org_name VARCHAR(255),\"\n            \" url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT,\"\n            \" UNIQUE (url_prefix))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        \n        sql = \"CREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, \"\n            \"email VARCHAR(255), is_staff INTEGER NOT NULL, \"\n            \"UNIQUE (org_id, email))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"orguser_email_idx\")) {\n        //    sql = \"CREATE INDEX orguser_email_idx ON OrgUser (email)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, \"\n            \"group_id INTEGER, \"\n            \"UNIQUE (org_id, group_id))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"orggroup_groupid_idx\")) {\n        //    sql = \"CREATE INDEX orggroup_groupid_idx ON OrgGroup (group_id)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n    }\n\n    return 0;\n}\n\nint ccnet_org_manager_create_org (CcnetOrgManager *mgr,\n                                  const char *org_name,\n                                  const char *url_prefix,\n                                  const char *creator,\n                                  GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    gint64 now = get_current_time();\n    int rc;\n\n    rc = seaf_db_statement_query (db,\n                                   \"INSERT INTO Organization(org_name, url_prefix,\"\n                                   \" creator, ctime) VALUES (?, ?, ?, ?)\",\n                                   4, \"string\", org_name, \"string\", url_prefix,\n                                   \"string\", creator, \"int64\", now);\n    \n    if (rc < 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create organization\");\n        return -1;\n    }\n\n    int org_id = seaf_db_statement_get_int (db,\n                                             \"SELECT org_id FROM Organization WHERE \"\n                                             \"url_prefix = ?\", 1, \"string\", url_prefix);\n    if (org_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create organization\");\n        return -1;\n    }\n\n    rc = seaf_db_statement_query (db, \"INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)\",\n                                   3, \"int\", org_id, \"string\", creator, \"int\", 1);\n    if (rc < 0) {\n        seaf_db_statement_query (db, \"DELETE FROM Organization WHERE org_id=?\",\n                                  1, \"int\", org_id);\n        g_set_error (error, CCNET_DOMAIN, 0, \"Failed to create organization\");\n        return -1;\n    }\n    \n    return org_id;\n}\n\nint\nccnet_org_manager_remove_org (CcnetOrgManager *mgr,\n                              int org_id,\n                              GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    seaf_db_statement_query (db, \"DELETE FROM Organization WHERE org_id = ?\",\n                              1, \"int\", org_id);\n\n    seaf_db_statement_query (db, \"DELETE FROM OrgUser WHERE org_id = ?\",\n                              1, \"int\", org_id);\n\n    seaf_db_statement_query (db, \"DELETE FROM OrgGroup WHERE org_id = ?\",\n                              1, \"int\", org_id);\n\n    return 0;\n}\n\n\nstatic gboolean\nget_all_orgs_cb (CcnetDBRow *row, void *data)\n{\n    GList **p_list = data;\n    CcnetOrganization *org = NULL;\n    int org_id;\n    const char *org_name;\n    const char *url_prefix;\n    const char *creator;\n    gint64 ctime;\n\n    org_id = seaf_db_row_get_column_int (row, 0);\n    org_name = seaf_db_row_get_column_text (row, 1);\n    url_prefix = seaf_db_row_get_column_text (row, 2);\n    creator = seaf_db_row_get_column_text (row, 3);\n    ctime = seaf_db_row_get_column_int64 (row, 4);\n\n    org = g_object_new (CCNET_TYPE_ORGANIZATION,\n                        \"org_id\", org_id,\n                        \"org_name\", org_name,\n                        \"url_prefix\", url_prefix,\n                        \"creator\", creator,\n                        \"ctime\", ctime,\n                        NULL);\n\n    *p_list = g_list_prepend (*p_list, org);\n\n    return TRUE;\n}\n\nGList *\nccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr,\n                                int start,\n                                int limit)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *ret = NULL;\n    int rc;\n\n    if (start == -1 && limit == -1) {\n        sql = \"SELECT * FROM Organization ORDER BY org_id\";\n        rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret, 0);\n    } else {\n        sql = \"SELECT * FROM Organization ORDER BY org_id LIMIT ? OFFSET ?\";\n        rc = seaf_db_statement_foreach_row (db, sql, get_all_orgs_cb, &ret,\n                                             2, \"int\", limit, \"int\", start);\n    }\n\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_org_manager_count_orgs (CcnetOrgManager *mgr)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    gint64 ret;\n\n    sql = \"SELECT count(*) FROM Organization\";\n\n    ret = seaf_db_get_int64 (db, sql);\n    if (ret < 0)\n        return -1;\n    return ret;\n}\n\nstatic gboolean\nget_org_cb (CcnetDBRow *row, void *data)\n{\n    CcnetOrganization **p_org = data;\n    int org_id;\n    const char *org_name;\n    const char *url_prefix;\n    const char *creator;\n    gint64 ctime;\n\n    org_id = seaf_db_row_get_column_int (row, 0);    \n    org_name = seaf_db_row_get_column_text (row, 1);\n    url_prefix = seaf_db_row_get_column_text (row, 2);\n    creator = seaf_db_row_get_column_text (row, 3);\n    ctime = seaf_db_row_get_column_int64 (row, 4);\n\n    *p_org = g_object_new (CCNET_TYPE_ORGANIZATION,\n                           \"org_id\", org_id,\n                           \"org_name\", org_name,\n                           \"url_prefix\", url_prefix,\n                           \"creator\", creator,\n                           \"ctime\", ctime,\n                           NULL);\n    return FALSE;\n}\n\nCcnetOrganization *\nccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr,\n                                         const char *url_prefix,\n                                         GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    CcnetOrganization *org = NULL;\n\n    sql = \"SELECT org_id, org_name, url_prefix, creator,\"\n        \" ctime FROM Organization WHERE url_prefix = ?\";\n\n    if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org,\n                                        1, \"string\", url_prefix) < 0) {\n        return NULL;\n    }\n\n    return org;\n}\n\nCcnetOrganization *\nccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    CcnetOrganization *org = NULL;\n\n    sql = \"SELECT org_id, org_name, url_prefix, creator,\"\n        \" ctime FROM Organization WHERE org_id = ?\";\n\n    if (seaf_db_statement_foreach_row (db, sql, get_org_cb, &org,\n                                        1, \"int\", org_id) < 0) {\n        return NULL;\n    }\n\n    return org;\n}\n\nint\nccnet_org_manager_add_org_user (CcnetOrgManager *mgr,\n                                int org_id,\n                                const char *email,\n                                int is_staff,\n                                GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"INSERT INTO OrgUser (org_id, email, is_staff) values (?, ?, ?)\",\n                                     3, \"int\", org_id, \"string\", email,\n                                     \"int\", is_staff);\n}\n\nint\nccnet_org_manager_remove_org_user (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"DELETE FROM OrgUser WHERE org_id=? AND \"\n                                     \"email=?\", 2, \"int\", org_id, \"string\", email);\n}\n\nstatic gboolean\nget_orgs_by_user_cb (CcnetDBRow *row, void *data)\n{\n    GList **p_list = (GList **)data;\n    CcnetOrganization *org = NULL;\n    int org_id;\n    const char *email;\n    int is_staff;\n    const char *org_name;\n    const char *url_prefix;\n    const char *creator;\n    gint64 ctime;\n\n    org_id = seaf_db_row_get_column_int (row, 0);\n    email = (char *) seaf_db_row_get_column_text (row, 1);\n    is_staff = seaf_db_row_get_column_int (row, 2);\n    org_name = (char *) seaf_db_row_get_column_text (row, 3);\n    url_prefix = (char *) seaf_db_row_get_column_text (row, 4);\n    creator = (char *) seaf_db_row_get_column_text (row, 5);\n    ctime = seaf_db_row_get_column_int64 (row, 6);\n    \n    org = g_object_new (CCNET_TYPE_ORGANIZATION,\n                        \"org_id\", org_id,\n                        \"email\", email,\n                        \"is_staff\", is_staff,\n                        \"org_name\", org_name,\n                        \"url_prefix\", url_prefix,\n                        \"creator\", creator,\n                        \"ctime\", ctime,\n                        NULL);\n    *p_list = g_list_prepend (*p_list, org);\n        \n    return TRUE;\n}\n\nGList *\nccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr,\n                                   const char *email,\n                                   GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *ret = NULL;\n\n    sql = \"SELECT t1.org_id, email, is_staff, org_name,\"\n        \" url_prefix, creator, ctime FROM OrgUser t1, Organization t2\"\n        \" WHERE t1.org_id = t2.org_id AND email = ?\";\n\n    if (seaf_db_statement_foreach_row (db, sql, get_orgs_by_user_cb, &ret,\n                                        1, \"string\", email) < 0) {\n        g_list_free (ret);\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\nget_org_emailusers (CcnetDBRow *row, void *data)\n{\n    GList **list = (GList **)data;\n    const char *email = (char *) seaf_db_row_get_column_text (row, 0);\n\n    *list = g_list_prepend (*list, g_strdup (email));\n    return TRUE;\n}\n\nGList *\nccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr,\n                                      const char *url_prefix,\n                                      int start, int limit)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *ret = NULL;\n    int rc;\n\n    if (start == -1 && limit == -1) {\n        sql = \"SELECT u.email FROM OrgUser u, Organization o \"\n            \"WHERE u.org_id = o.org_id AND \"\n            \"o.url_prefix = ? \"\n            \"ORDER BY email\";\n        rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret,\n                                             1, \"string\", url_prefix);\n    } else {\n        sql = \"SELECT u.email FROM OrgUser u, Organization o \"\n            \"WHERE u.org_id = o.org_id AND \"\n            \"o.url_prefix = ? \"\n            \" ORDER BY email LIMIT ? OFFSET ?\";\n        rc = seaf_db_statement_foreach_row (db, sql, get_org_emailusers, &ret,\n                                             3, \"string\", url_prefix,\n                                             \"int\", limit, \"int\", start);\n    }\n\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_org_manager_add_org_group (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 int group_id,\n                                 GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"INSERT INTO OrgGroup (org_id, group_id) VALUES (?, ?)\",\n                                     2, \"int\", org_id, \"int\", group_id);\n}\n\nint\nccnet_org_manager_remove_org_group (CcnetOrgManager *mgr,\n                                    int org_id,\n                                    int group_id,\n                                    GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"DELETE FROM OrgGroup WHERE org_id=?\"\n                                     \" AND group_id=?\",\n                                     2, \"int\", org_id, \"string\", group_id);\n}\n\nint\nccnet_org_manager_is_org_group (CcnetOrgManager *mgr,\n                                int group_id,\n                                GError **error)\n{\n    gboolean exists, err;\n\n    CcnetDB *db = mgr->priv->db;\n\n    exists = seaf_db_statement_exists (db, \"SELECT group_id FROM OrgGroup \"\n                                        \"WHERE group_id = ?\", &err, 1, \"int\", group_id);\n    if (err) {\n        ccnet_warning (\"DB error when check group exist in OrgGroup.\\n\");\n        return 0;\n    }\n    return exists;\n}\n\nint\nccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr,\n                                       int group_id,\n                                       GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n\n    sql = \"SELECT org_id FROM OrgGroup WHERE group_id = ?\";\n    return seaf_db_statement_get_int (db, sql, 1, \"int\", group_id);\n}\n\nstatic gboolean\nget_org_group_ids (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n\n    int group_id = seaf_db_row_get_column_int (row, 0);\n\n    *plist = g_list_prepend (*plist, (gpointer)(long)group_id);\n\n    return TRUE;\n}\n\nGList *\nccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr,\n                                     int org_id,\n                                     int start,\n                                     int limit)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    int rc;\n\n    if (limit == -1) {\n        rc = seaf_db_statement_foreach_row (db,\n                                             \"SELECT group_id FROM OrgGroup WHERE \"\n                                             \"org_id = ?\",\n                                             get_org_group_ids, &ret,\n                                             1, \"int\", org_id);\n    } else {\n        rc = seaf_db_statement_foreach_row (db,\n                                             \"SELECT group_id FROM OrgGroup WHERE \"\n                                             \"org_id = ? LIMIT ? OFFSET ?\",\n                                             get_org_group_ids, &ret,\n                                             3, \"int\", org_id, \"int\", limit,\n                                             \"int\", start);\n    }\n    \n    if (rc < 0) {\n        g_list_free (ret);\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\nget_org_groups (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n    CcnetGroup *group;\n\n    int group_id = seaf_db_row_get_column_int (row, 0);\n    const char *group_name = seaf_db_row_get_column_text (row, 1);\n    const char *creator_name = seaf_db_row_get_column_text (row, 2);\n    gint64 ts = seaf_db_row_get_column_int64 (row, 3);\n    int parent_group_id = seaf_db_row_get_column_int (row, 4);\n\n    group = g_object_new (CCNET_TYPE_GROUP,\n                          \"id\", group_id,\n                          \"group_name\", group_name,\n                          \"creator_name\", creator_name,\n                          \"timestamp\", ts,\n                          \"source\", \"DB\",\n                          \"parent_group_id\", parent_group_id,\n                          NULL);\n\n    *plist = g_list_prepend (*plist, group);\n\n    return TRUE;\n}\n\nGList *\nccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    GList *ret = NULL;\n    char *sql;\n    int rc;\n\n    sql = \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n          \"`OrgGroup` o, `Group` g WHERE o.group_id = g.group_id AND \"\n          \"org_id=? AND parent_group_id=-1 ORDER BY timestamp DESC\";\n\n    rc = seaf_db_statement_foreach_row (db, sql,\n                                         get_org_groups, &ret,\n                                         1, \"int\", org_id);\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nGList *\nccnet_org_manager_get_org_groups (CcnetOrgManager *mgr,\n                                  int org_id,\n                                  int start,\n                                  int limit)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *ret = NULL;\n    int rc;\n\n    if (limit == -1) {\n        sql = \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ?\";\n        rc = seaf_db_statement_foreach_row (db,\n                                             sql,\n                                             get_org_groups, &ret,\n                                             1, \"int\", org_id);\n    } else {\n        sql = \"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"\n            \"OrgGroup o, `Group` g WHERE o.group_id = g.group_id AND org_id = ? \"\n            \"LIMIT ? OFFSET ?\";\n        rc = seaf_db_statement_foreach_row (db,\n                                             sql,\n                                             get_org_groups, &ret,\n                                             3, \"int\", org_id, \"int\", limit,\n                                             \"int\", start);\n    }\n    \n    if (rc < 0) {\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nGList *\nccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr,\n                                          const char *user,\n                                          int org_id)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n    GList *ret = NULL;\n    int rc;\n\n    sql = \"SELECT g.group_id, group_name, creator_name, timestamp FROM \"\n          \"OrgGroup o, `Group` g, GroupUser u \"\n          \"WHERE o.group_id = g.group_id AND org_id = ? AND \"\n          \"g.group_id = u.group_id AND user_name = ?\";\n    rc = seaf_db_statement_foreach_row (db,\n                                         sql,\n                                         get_org_groups, &ret,\n                                         2, \"int\", org_id, \"string\", user);\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_org_manager_org_user_exists (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error)\n{\n    gboolean exists, err;\n\n    CcnetDB *db = mgr->priv->db;\n\n    exists = seaf_db_statement_exists (db, \"SELECT org_id FROM OrgUser WHERE \"\n                                        \"org_id = ? AND email = ?\", &err,\n                                        2, \"int\", org_id, \"string\", email);\n    if (err) {\n        ccnet_warning (\"DB error when check user exist in OrgUser.\\n\");\n        return 0;\n    }\n    return exists;\n}\n\nchar *\nccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr,\n                                            int org_id,\n                                            GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n\n    sql = \"SELECT url_prefix FROM Organization WHERE org_id = ?\";\n\n    return seaf_db_statement_get_string (db, sql, 1, \"int\", org_id);\n}\n\nint\nccnet_org_manager_is_org_staff (CcnetOrgManager *mgr,\n                                int org_id,\n                                const char *email,\n                                GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n    char *sql;\n\n    sql = \"SELECT is_staff FROM OrgUser WHERE org_id=? AND email=?\";\n\n    return seaf_db_statement_get_int (db, sql, 2, \"int\", org_id, \"string\", email);\n}\n\nint\nccnet_org_manager_set_org_staff (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 const char *email,\n                                 GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"UPDATE OrgUser SET is_staff = 1 \"\n                                     \"WHERE org_id=? AND email=?\", 2,\n                                     \"int\", org_id, \"string\", email);\n}\n\nint\nccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db, \"UPDATE OrgUser SET is_staff = 0 \"\n                                     \"WHERE org_id=? AND email=?\", 2,\n                                     \"int\", org_id, \"string\", email);\n}\n\nint\nccnet_org_manager_set_org_name(CcnetOrgManager *mgr,\n                               int org_id,\n                               const char *org_name,\n                               GError **error)\n{\n    CcnetDB *db = mgr->priv->db;\n\n    return seaf_db_statement_query (db,\n                                     \"UPDATE `Organization` set org_name = ? \"\n                                     \"WHERE org_id = ?\",\n                                     2, \"string\", org_name, \"int\", org_id);\n    return 0;\n}\n\n"
  },
  {
    "path": "common/org-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef _ORG_MGR_H_\n#define _ORG_MGR_H_\n\ntypedef struct _SeafileSession SeafileSession;\ntypedef struct _CcnetOrgManager CcnetOrgManager;\ntypedef struct _CcnetOrgManagerPriv CcnetOrgManagerPriv;\n\nstruct _CcnetOrgManager\n{\n    SeafileSession\t*session;\n\n    CcnetOrgManagerPriv\t*priv;\n};\n\nCcnetOrgManager* ccnet_org_manager_new (SeafileSession *session);\n\nint\nccnet_org_manager_prepare (CcnetOrgManager *manager);\n\nvoid\nccnet_org_manager_start (CcnetOrgManager *manager);\n\nint\nccnet_org_manager_create_org (CcnetOrgManager *mgr,\n                              const char *org_name,\n                              const char *url_prefix,\n                              const char *creator,\n                              GError **error);\n\nint\nccnet_org_manager_remove_org (CcnetOrgManager *mgr,\n                              int org_id,\n                              GError **error);\n\nGList *\nccnet_org_manager_get_all_orgs (CcnetOrgManager *mgr,\n                                int start,\n                                int limit);\n\nint\nccnet_org_manager_count_orgs (CcnetOrgManager *mgr);\n\nCcnetOrganization *\nccnet_org_manager_get_org_by_url_prefix (CcnetOrgManager *mgr,\n                                         const char *url_prefix,\n                                         GError **error);\n\nCcnetOrganization *\nccnet_org_manager_get_org_by_id (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 GError **error);\n\nint\nccnet_org_manager_add_org_user (CcnetOrgManager *mgr,\n                                int org_id,\n                                const char *email,\n                                int is_staff,\n                                GError **error);\n\nint\nccnet_org_manager_remove_org_user (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error);\n\nGList *\nccnet_org_manager_get_orgs_by_user (CcnetOrgManager *mgr,\n                                   const char *email,\n                                   GError **error);\n\nGList *\nccnet_org_manager_get_org_emailusers (CcnetOrgManager *mgr,\n                                      const char *url_prefix,\n                                      int start, int limit);\n\nint\nccnet_org_manager_add_org_group (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 int group_id,\n                                 GError **error);\nint\nccnet_org_manager_remove_org_group (CcnetOrgManager *mgr,\n                                    int org_id,\n                                    int group_id,\n                                    GError **error);\n\nint\nccnet_org_manager_is_org_group (CcnetOrgManager *mgr,\n                                int group_id,\n                                GError **error);\n\nint\nccnet_org_manager_get_org_id_by_group (CcnetOrgManager *mgr,\n                                       int group_id,\n                                       GError **error);\n\nGList *\nccnet_org_manager_get_org_group_ids (CcnetOrgManager *mgr,\n                                     int org_id,\n                                     int start,\n                                     int limit);\n\nGList *\nccnet_org_manager_get_org_groups (CcnetOrgManager *mgr,\n                                  int org_id,\n                                  int start,\n                                  int limit);\n\nGList *\nccnet_org_manager_get_org_groups_by_user (CcnetOrgManager *mgr,\n                                          const char *user,\n                                          int org_id);\n\nGList *\nccnet_org_manager_get_org_top_groups (CcnetOrgManager *mgr, int org_id, GError **error);\n\nint\nccnet_org_manager_org_user_exists (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error);\n\nchar *\nccnet_org_manager_get_url_prefix_by_org_id (CcnetOrgManager *mgr,\n                                            int org_id,\n                                            GError **error);\n\nint\nccnet_org_manager_is_org_staff (CcnetOrgManager *mgr,\n                                int org_id,\n                                const char *email,\n                                GError **error);\n\nint\nccnet_org_manager_set_org_staff (CcnetOrgManager *mgr,\n                                 int org_id,\n                                 const char *email,\n                                 GError **error);\n\nint\nccnet_org_manager_unset_org_staff (CcnetOrgManager *mgr,\n                                   int org_id,\n                                   const char *email,\n                                   GError **error);\n\nint\nccnet_org_manager_set_org_name(CcnetOrgManager *mgr,\n                               int org_id,\n                               const char *org_name,\n                               GError **error);\n\n\n#endif /* _ORG_MGR_H_ */\n"
  },
  {
    "path": "common/password-hash.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include <string.h>\n#include <glib.h>\n#include <argon2.h>\n#include \"password-hash.h\"\n#include \"seafile-crypt.h\"\n#include <openssl/rand.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n\n// pbkdf2\ntypedef struct Pbkdf2Params {\n    int iteration;\n} Pbkdf2Params;\n\nstatic Pbkdf2Params *\nparse_pbkdf2_sha256_params (const char *params_str)\n{\n    Pbkdf2Params *params = NULL;\n    if (!params_str) {\n        params = g_new0 (Pbkdf2Params, 1);\n        params->iteration = 1000;\n        return params;\n    }\n    int iteration;\n    iteration = atoi (params_str);\n    if (iteration <= 0) {\n        iteration = 1000;\n    }\n\n    params = g_new0 (Pbkdf2Params, 1);\n    params->iteration = iteration;\n    return params;\n}\n\nstatic int\npbkdf2_sha256_derive_key (const char *data_in, int in_len,\n                          const char *salt,\n                          Pbkdf2Params *params,\n                          unsigned char *key)\n{\n    int iteration = params->iteration;\n\n    unsigned char salt_bin[32] = {0};\n    hex_to_rawdata (salt, salt_bin, 32);\n\n    PKCS5_PBKDF2_HMAC (data_in, in_len,\n                       salt_bin, sizeof(salt_bin),\n                       iteration,\n                       EVP_sha256(),\n                       32, key);\n    return 0;\n}\n\n// argon2id\ntypedef struct Argon2idParams{\n    gint64 time_cost; \n    gint64 memory_cost;\n    gint64 parallelism;\n} Argon2idParams;\n\n// The arguments to argon2 are separated by commas.\n// Example arguments format:\n// 2,102400,8\n// The parameters are time_cost, memory_cost, parallelism from left to right.\nstatic Argon2idParams *\nparse_argon2id_params (const char *params_str)\n{\n    char **params;\n    Argon2idParams *argon2_params = g_new0 (Argon2idParams, 1);\n    if (params_str)\n        params = g_strsplit (params_str, \",\", 3);\n    if (!params_str || g_strv_length(params) != 3) {\n        if (params_str)\n            g_strfreev (params);\n        argon2_params->time_cost = 2; // 2-pass computation\n        argon2_params->memory_cost = 102400; // 100 mebibytes memory usage\n        argon2_params->parallelism = 8; // number of threads and lanes\n        return argon2_params;\n    }\n\n    char *p = NULL;\n    p = g_strstrip (params[0]);\n    argon2_params->time_cost = atoll (p);\n    if (argon2_params->time_cost <= 0) {\n        argon2_params->time_cost = 2;\n    }\n\n    p = g_strstrip (params[1]);\n    argon2_params->memory_cost = atoll (p);\n    if (argon2_params->memory_cost <= 0) {\n        argon2_params->memory_cost = 102400;\n    }\n\n    p = g_strstrip (params[2]);\n    argon2_params->parallelism = atoll (p);\n    if (argon2_params->parallelism <= 0) {\n        argon2_params->parallelism = 8;\n    }\n\n    g_strfreev (params);\n    return argon2_params;\n}\n\nstatic int\nargon2id_derive_key (const char *data_in, int in_len,\n                     const char *salt,\n                     Argon2idParams *params,\n                     unsigned char *key)\n{\n    unsigned char salt_bin[32] = {0};\n    hex_to_rawdata (salt, salt_bin, 32);\n\n    argon2id_hash_raw(params->time_cost, params->memory_cost, params->parallelism,\n                      data_in, in_len,\n                      salt_bin, sizeof(salt_bin),\n                      key, 32);\n\n    return 0;\n}\n\n// parse_pwd_hash_params is used to parse default pwd hash algorithms.\nvoid\nparse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params)\n{\n    if (g_strcmp0 (algo, PWD_HASH_PDKDF2) == 0) {\n        params->algo = g_strdup (PWD_HASH_PDKDF2);\n        if (params_str)\n            params->params_str = g_strdup (params_str);\n        else\n            params->params_str = g_strdup (\"1000\");\n    } else if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) {\n        params->algo = g_strdup (PWD_HASH_ARGON2ID);\n        if (params_str)\n            params->params_str = g_strdup (params_str);\n        else\n            params->params_str = g_strdup (\"2,102400,8\");\n    } else {\n        params->algo = NULL;\n    }\n\n    seaf_message (\"password hash algorithms: %s, params: %s\\n \", params->algo, params->params_str);\n}\n\nint\npwd_hash_derive_key (const char *data_in, int in_len,\n                     const char *salt,\n                     const char *algo, const char *params_str,\n                     unsigned char *key)\n{\n    int ret = 0;\n    if (g_strcmp0 (algo, PWD_HASH_ARGON2ID) == 0) {\n        Argon2idParams *algo_params = parse_argon2id_params (params_str);\n        ret = argon2id_derive_key (data_in, in_len,\n                                   salt, algo_params, key);\n        g_free (algo_params);\n        return ret;\n    } else {\n        Pbkdf2Params *algo_params = parse_pbkdf2_sha256_params (params_str);\n        ret = pbkdf2_sha256_derive_key (data_in, in_len,\n                                        salt, algo_params, key);\n        g_free (algo_params);\n        return ret;\n    }\n}\n"
  },
  {
    "path": "common/password-hash.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef _PASSWORD_HASH_H\n#define _PASSWORD_HASH_H\n\n#define PWD_HASH_PDKDF2 \"pbkdf2_sha256\"\n#define PWD_HASH_ARGON2ID \"argon2id\"\n\ntypedef struct _PwdHashParams {\n    char *algo;\n    char *params_str;\n} PwdHashParams;\n\nvoid\n parse_pwd_hash_params (const char *algo, const char *params_str, PwdHashParams *params);\n\nint\npwd_hash_derive_key (const char *data_in, int in_len,\n                     const char *repo_salt,\n                     const char *algo, const char *params_str,\n                     unsigned char *key);\n\n#endif\n"
  },
  {
    "path": "common/processors/objecttx-common.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef OBJECTTX_COMMON_H\n#define OBJECTTX_COMMON_H\n\n#define SC_GET_OBJECT   \"301\"\n#define SS_GET_OBJECT   \"Get Object\"\n#define SC_OBJECT       \"302\"\n#define SS_OBJECT       \"Object\"\n#define SC_END          \"303\"\n#define SS_END          \"END\"\n#define SC_COMMIT_IDS   \"304\"\n#define SS_COMMIT_IDS   \"Commit IDs\"\n#define SC_ACK          \"305\"\n#define SS_ACK          \"Ack\"\n\n#define SC_OBJ_SEG      \"306\"\n#define SS_OBJ_SEG      \"Object Segment\"\n#define SC_OBJ_SEG_END  \"307\"\n#define SS_OBJ_SEG_END  \"Object Segment End\"\n\n#define SC_OBJ_LIST_SEG \"308\"\n#define SS_OBJ_LIST_SEG \"Object List Segment\"\n#define SC_OBJ_LIST_SEG_END \"309\"\n#define SS_OBJ_LIST_SEG_END \"Object List Segment End\"\n\n#define SC_NOT_FOUND    \"401\"\n#define SS_NOT_FOUND    \"Object not found\"\n#define SC_BAD_OL       \"402\"\n#define SS_BAD_OL       \"Bad Object List\"\n#define SC_BAD_OBJECT   \"403\"\n#define SS_BAD_OBJECT   \"Bad Object\"\n\n#define SC_ACCESS_DENIED \"410\"\n#define SS_ACCESS_DENIED \"Access denied\"\n\n/* for fs transfer */\n#define SC_ROOT         \"304\"\n#define SS_ROOT         \"FS Root\"\n#define SC_ROOT_END     \"305\"\n#define SS_ROOT_END     \"FS Root End\"\n\n/* max fs object segment size */\n#define MAX_OBJ_SEG_SIZE 64000\n\n\ntypedef struct {\n    char    id[41];\n    uint8_t object[0];\n} __attribute__((__packed__)) ObjectPack;\n\n#endif\n"
  },
  {
    "path": "common/redis-cache.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <hiredis.h>\n#include \"redis-cache.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n\nstruct _RedisConnectionPool {\n    char *host;\n    int port;\n    GPtrArray *connections;\n    pthread_mutex_t lock;\n    int max_connections;\n};\ntypedef struct _RedisConnectionPool RedisConnectionPool;\n\nstruct _RedisConnection {\n    gboolean is_available;\n    redisContext *ac;\n    gint64 ctime;               /* Used to clean up unused connection. */\n    gboolean release;           /* If TRUE, the connection will be released. */\n};\ntypedef struct _RedisConnection RedisConnection;\n\ntypedef struct RedisPriv {\n    RedisConnectionPool *redis_pool;\n    char *passwd;\n} RedisPriv;\n\nstatic int\nredis_auth (RedisConnection *conn, const char *passwd)\n{\n    redisReply *reply;\n    int ret = 0;\n\n    if (!passwd) {\n        return 0;\n    }\n\n    reply = redisCommand(conn->ac, \"AUTH %s\", passwd);\n    if (!reply) {\n        seaf_warning (\"Failed to auth redis server.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    if (reply->type != REDIS_REPLY_STATUS ||\n        g_strcmp0 (reply->str, \"OK\") != 0) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            seaf_warning (\"Failed to auth redis: %s.\\n\", reply->str);\n        }\n        ret = -1;\n        goto out;\n    }\n\nout:\n    freeReplyObject (reply);\n    return ret;\n}\n\n\nstatic RedisConnection *\nredis_connection_new (const char *host, const char *passwd, int port)\n{\n    RedisConnection *conn = g_new0 (RedisConnection, 1);\n\n    conn->ac = redisConnect(host, port);\n    if (!conn->ac || conn->ac->err) {\n        if (conn->ac) {\n            seaf_warning (\"Failed to connect to redis : %s\\n\", conn->ac->errstr);\n            redisFree (conn->ac);\n        } else {\n            seaf_warning (\"Can't allocate redis context\\n\");\n        }\n        g_free (conn);\n        return NULL;\n    }\n\n    if (redis_auth (conn, passwd) < 0) {\n        redisFree (conn->ac);\n        g_free (conn);\n        return NULL;\n    }\n    conn->ctime = (gint64)time(NULL);\n\n    return conn;\n}\n\nstatic void\nredis_connection_free (RedisConnection *conn)\n{\n    if (!conn)\n        return;\n\n    if (conn->ac)\n        redisFree(conn->ac);\n\n    g_free (conn);\n}\n\nstatic RedisConnectionPool *\nredis_connection_pool_new (const char *host, int port, int max_connections)\n{\n    RedisConnectionPool *pool = g_new0 (RedisConnectionPool, 1);\n    pool->host = g_strdup(host);\n    pool->port = port;\n    pool->connections = g_ptr_array_sized_new (max_connections);\n    pool->max_connections = max_connections;\n    pthread_mutex_init (&pool->lock, NULL);\n    return pool;\n}\n\nstatic RedisConnection *\nredis_connection_pool_get_connection (RedisConnectionPool *pool, const char *passwd)\n{\n    RedisConnection *conn = NULL;\n\n    if (pool->max_connections == 0) {\n        conn = redis_connection_new (pool->host, passwd, pool->port);\n        return conn;\n    }\n\n    pthread_mutex_lock (&pool->lock);\n\n    guint i, size = pool->connections->len;\n    for (i = 0; i < size; ++i) {\n        conn = g_ptr_array_index (pool->connections, i);\n        if (!conn->is_available) {\n            continue;\n        }\n        conn->is_available = FALSE;\n        goto out;\n    }\n    conn = NULL;\n    if (size < pool->max_connections) {\n        conn = redis_connection_new (pool->host, passwd, pool->port);\n        if (conn) {\n            conn->is_available = FALSE;\n            g_ptr_array_add (pool->connections, conn);\n        }\n    } else {\n        seaf_warning (\"The number of redis connections exceeds the limit. The maximum connections is %d.\\n\", pool->max_connections);\n    }\n\nout:\n    pthread_mutex_unlock (&pool->lock);\n    return conn;\n}\n\nstatic void\nredis_connection_pool_return_connection (RedisConnectionPool *pool, RedisConnection *conn)\n{\n    if (!conn)\n        return;\n\n    if (pool->max_connections == 0) {\n        redis_connection_free (conn);\n        return;\n    }\n\n    if (conn->release) {\n        pthread_mutex_lock (&pool->lock);\n        g_ptr_array_remove (pool->connections, conn);\n        pthread_mutex_unlock (&pool->lock);\n        redis_connection_free (conn);\n        return;\n    }\n\n    pthread_mutex_lock (&pool->lock);\n    conn->is_available = TRUE;\n    pthread_mutex_unlock (&pool->lock);\n}\n\nvoid *\nredis_cache_get_object (ObjCache *cache, const char *obj_id, size_t *len)\n{\n    RedisConnection *conn;\n    char *object = NULL;\n    redisReply *reply;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return NULL;\n    }\n\n    reply = redisCommand(conn->ac, \"GET %s\", obj_id);\n    if (!reply) {\n        seaf_warning (\"Failed to get object %s from redis cache.\\n\", obj_id);\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_STRING) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to get %s from redis cache: %s.\\n\",\n                      obj_id, reply->str);\n        }\n        goto out;\n    }\n\n    *len = reply->len;\n    object = g_memdup (reply->str, reply->len);\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return object;\n}\n\nint\nredis_cache_set_object (ObjCache *cache,\n                        const char *obj_id,\n                        const void *object,\n                        int len,\n                        int expiry)\n{\n    RedisConnection *conn;\n    redisReply *reply;\n    int ret = 0;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return -1;\n    }\n\n    if (expiry <= 0)\n        expiry = cache->mc_expiry;\n    reply = redisCommand(conn->ac, \"SET %s %b EX %d\", obj_id, object, len, expiry);\n    if (!reply) {\n        seaf_warning (\"Failed to set object %s to redis cache.\\n\", obj_id);\n        ret = -1;\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_STATUS ||\n        g_strcmp0 (reply->str, \"OK\") != 0) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to set %s to redis: %s.\\n\",\n                          obj_id, reply->str);\n        }\n        ret = -1;\n    }\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return ret;\n}\n\ngboolean\nredis_cache_test_object (ObjCache *cache, const char *obj_id)\n{\n    RedisConnection *conn;\n    redisReply *reply;\n    gboolean ret = FALSE;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return ret;\n    }\n\n    reply = redisCommand(conn->ac, \"EXISTS %s\", obj_id);\n    if (!reply) {\n        seaf_warning (\"Failed to test object %s from redis cache.\\n\", obj_id);\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_INTEGER ||\n        reply->integer != 1) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to test %s from redis: %s.\\n\",\n                          obj_id, reply->str);\n        }\n        goto out;\n    }\n\n    ret = TRUE;\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return ret;\n}\n\nint\nredis_cache_delete_object (ObjCache *cache, const char *obj_id)\n{\n    RedisConnection *conn;\n    redisReply *reply;\n    int ret = 0;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return -1;\n    }\n\n    reply = redisCommand(conn->ac, \"DEL %s\", obj_id);\n    if (!reply) {\n        seaf_warning (\"Failed to delete object %s from redis cache.\\n\", obj_id);\n        ret = -1;\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_INTEGER ||\n        reply->integer != 1) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to del %s from redis: %s.\\n\",\n                          obj_id, reply->str);\n        }\n        ret = -1;\n    }\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return ret;\n}\n\nint\nredis_cache_publish (ObjCache *cache, const char *channel, const char *msg)\n{\n    RedisConnection *conn;\n    redisReply *reply;\n    int ret = 0;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return -1;\n    }\n\n    reply = redisCommand(conn->ac, \"PUBLISH %s %s\", channel, msg);\n    if (!reply) {\n        seaf_warning (\"Failed to publish message to redis channel %s.\\n\", channel);\n        ret = -1;\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_INTEGER ||\n        reply->integer < 0) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to publish message to redis channel %s.\\n\", channel);\n        }\n        ret = -1;\n    }\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return ret;\n}\n\nint\nredis_cache_push (ObjCache *cache, const char *list, const char *msg)\n{\n    RedisConnection *conn;\n    redisReply *reply;\n    int ret = 0;\n    RedisPriv *priv = cache->priv;\n    RedisConnectionPool *pool = priv->redis_pool;\n\n    conn = redis_connection_pool_get_connection (pool, priv->passwd);\n    if (!conn) {\n        seaf_warning (\"Failed to get redis connection to host %s.\\n\", cache->host);\n        return -1;\n    }\n\n    reply = redisCommand(conn->ac, \"LPUSH %s %s\", list, msg);\n    if (!reply) {\n        seaf_warning (\"Failed to push message to redis list %s.\\n\", list);\n        ret = -1;\n        conn->release = TRUE;\n        goto out;\n    }\n    if (reply->type != REDIS_REPLY_INTEGER ||\n        reply->integer < 0) {\n        if (reply->type == REDIS_REPLY_ERROR) {\n            conn->release = TRUE;\n            seaf_warning (\"Failed to push message to redis list %s.\\n\", list);\n        }\n        ret = -1;\n    }\n\nout:\n    freeReplyObject(reply);\n    redis_connection_pool_return_connection (pool, conn);\n\n    return ret;\n}\n\nObjCache *\nredis_cache_new (const char *host, const char *passwd,\n                 int port, int redis_expiry,\n                 int max_connections)\n{\n    ObjCache *cache = g_new0 (ObjCache, 1);\n    RedisPriv *priv = g_new0 (RedisPriv, 1);\n\n    priv->redis_pool = redis_connection_pool_new (host, port, max_connections);\n\n    cache->priv = priv;\n\n    cache->host = g_strdup (host);\n    priv->passwd = g_strdup (passwd);\n    cache->port = port;\n    cache->mc_expiry = redis_expiry;\n    cache->cache_type = TYPE_REDIS;\n\n    cache->get_object = redis_cache_get_object;\n    cache->set_object = redis_cache_set_object;\n    cache->test_object = redis_cache_test_object;\n    cache->delete_object = redis_cache_delete_object;\n    cache->publish = redis_cache_publish;\n    cache->push = redis_cache_push;\n\n    return cache;\n}\n"
  },
  {
    "path": "common/redis-cache.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef REDIS_CACHE_H\n#define REDIS_CACHE_H\n\n#include \"obj-cache.h\"\n\nObjCache *\nredis_cache_new (const char *host, const char *passwd,\n                 int port, int mc_expiry,\n                 int max_connections);\n\n\n#endif\n"
  },
  {
    "path": "common/rpc-service.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n#include <glib/gstdio.h>\n#include <ctype.h>\n\n#include <sys/stat.h>\n#include <dirent.h>\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n#include \"fs-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"seafile-error.h\"\n#include \"seafile-rpc.h\"\n#include \"mq-mgr.h\"\n#include \"password-hash.h\"\n\n#ifdef SEAFILE_SERVER\n#include \"web-accesstoken-mgr.h\"\n#endif\n\n#ifndef SEAFILE_SERVER\n#include \"seafile-config.h\"\n#endif\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n\n#ifndef SEAFILE_SERVER\n#include \"../daemon/vc-utils.h\"\n\n#endif  /* SEAFILE_SERVER */\n\n\n/* -------- Utilities -------- */\nstatic GObject*\nconvert_repo (SeafRepo *r)\n{\n    SeafileRepo *repo = NULL;\n\n#ifndef SEAFILE_SERVER\n    if (r->head == NULL)\n        return NULL;\n\n    if (r->worktree_invalid && !seafile_session_config_get_allow_invalid_worktree(seaf))\n        return NULL;\n#endif\n\n    repo = seafile_repo_new ();\n    if (!repo)\n        return NULL;\n\n    g_object_set (repo, \"id\", r->id, \"name\", r->name,\n                  \"desc\", r->desc, \"encrypted\", r->encrypted,\n                  \"magic\", r->magic, \"enc_version\", r->enc_version,\n                  \"pwd_hash\", r->pwd_hash,\n                  \"pwd_hash_algo\", r->pwd_hash_algo, \"pwd_hash_params\", r->pwd_hash_params,\n                  \"head_cmmt_id\", r->head ? r->head->commit_id : NULL,\n                  \"root\", r->root_id,\n                  \"version\", r->version, \"last_modify\", r->last_modify,\n                  \"last_modifier\", r->last_modifier,\n                  NULL);\n    g_object_set (repo,\n                  \"repo_id\", r->id, \"repo_name\", r->name,\n                  \"repo_desc\", r->desc, \"last_modified\", r->last_modify,\n                  \"status\", r->status,\n                  \"repo_type\", r->type,\n                  NULL);\n\n#ifdef SEAFILE_SERVER\n    if (r->virtual_info) {\n        g_object_set (repo,\n                      \"is_virtual\", TRUE,\n                      \"origin_repo_id\", r->virtual_info->origin_repo_id,\n                      \"origin_path\", r->virtual_info->path,\n                      NULL);\n    }\n\n    if (r->encrypted) {\n        if (r->enc_version >= 2)\n            g_object_set (repo, \"random_key\", r->random_key, NULL);\n        if (r->enc_version >= 3)\n            g_object_set (repo, \"salt\", r->salt, NULL);\n    }\n\n    g_object_set (repo, \"store_id\", r->store_id,\n                  \"repaired\", r->repaired,\n                  \"size\", r->size, \"file_count\", r->file_count, NULL);\n    g_object_set (repo, \"is_corrupted\", r->is_corrupted, NULL);\n#endif\n\n#ifndef SEAFILE_SERVER\n    g_object_set (repo, \"worktree\", r->worktree,\n                  \"relay-id\", r->relay_id,\n                  \"worktree-invalid\", r->worktree_invalid,\n                  \"last-sync-time\", r->last_sync_time,\n                  \"auto-sync\", r->auto_sync,\n                  NULL);\n\n#endif  /* SEAFILE_SERVER */\n\n    return (GObject *)repo;\n}\n\nstatic void\nfree_repo_obj (gpointer repo)\n{\n    if (!repo)\n        return;\n    g_object_unref ((GObject *)repo);\n}\n\nstatic GList *\nconvert_repo_list (GList *inner_repos)\n{\n    GList *ret = NULL, *ptr;\n    GObject *repo = NULL;\n\n    for (ptr = inner_repos; ptr; ptr=ptr->next) {\n        SeafRepo *r = ptr->data;\n        repo = convert_repo (r);\n        if (!repo) {\n            g_list_free_full (ret, free_repo_obj);\n            return NULL;\n        }\n\n        ret = g_list_prepend (ret, repo);\n    }\n\n    return g_list_reverse (ret);\n}\n\n/*\n * RPC functions available for both clients and server.\n */\n\nGList *\nseafile_branch_gets (const char *repo_id, GError **error)\n{\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    GList *blist = seaf_branch_manager_get_branch_list(seaf->branch_mgr,\n                                                       repo_id);\n    GList *ptr;\n    GList *ret = NULL;\n\n    for (ptr = blist; ptr; ptr=ptr->next) {\n        SeafBranch *b = ptr->data;\n        SeafileBranch *branch = seafile_branch_new ();\n        g_object_set (branch, \"repo_id\", b->repo_id, \"name\", b->name,\n                      \"commit_id\", b->commit_id, NULL);\n        ret = g_list_prepend (ret, branch);\n        seaf_branch_unref (b);\n    }\n    ret = g_list_reverse (ret);\n    g_list_free (blist);\n    return ret;\n}\n\n#ifdef SEAFILE_SERVER\nGList*\nseafile_get_trash_repo_list (int start, int limit, GError **error)\n{\n    return seaf_repo_manager_get_trash_repo_list (seaf->repo_mgr,\n                                                  start, limit,\n                                                  error);\n}\n\nGList *\nseafile_get_trash_repos_by_owner (const char *owner, GError **error)\n{\n    if (!owner) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    return seaf_repo_manager_get_trash_repos_by_owner (seaf->repo_mgr,\n                                                       owner,\n                                                       error);\n}\n\nint\nseafile_del_repo_from_trash (const char *repo_id, GError **error)\n{\n    int ret = 0;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_del_repo_from_trash (seaf->repo_mgr, repo_id, error);\n\n    return ret;\n}\n\nint\nseafile_empty_repo_trash (GError **error)\n{\n    return seaf_repo_manager_empty_repo_trash (seaf->repo_mgr, error);\n}\n\nint\nseafile_empty_repo_trash_by_owner (const char *owner, GError **error)\n{\n    if (!owner) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_repo_manager_empty_repo_trash_by_owner (seaf->repo_mgr, owner, error);\n}\n\nint\nseafile_restore_repo_from_trash (const char *repo_id, GError **error)\n{\n    int ret = 0;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_restore_repo_from_trash (seaf->repo_mgr, repo_id, error);\n\n    return ret;\n}\n\nint\nseafile_publish_event(const char *channel, const char *content, GError **error)\n{\n    int ret = 0;\n\n    if (!channel || !content) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");                                                          \n        return -1;\n    }\n\n    ret = seaf_mq_manager_publish_event (seaf->mq_mgr, channel, content);\n\n    return ret;\n}\n\njson_t *\nseafile_pop_event(const char *channel, GError **error)\n{\n    if (!channel) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n    return seaf_mq_manager_pop_event (seaf->mq_mgr, channel);\n}\n#endif\n\nGList*\nseafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error)\n{\n    GList *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, start, limit, order_by, ret_virt_repo);\n    GList *ret = NULL;\n\n    ret = convert_repo_list (repos);\n\n#ifdef SEAFILE_SERVER\n    GList *ptr;\n    for (ptr = repos; ptr != NULL; ptr = ptr->next)\n        seaf_repo_unref ((SeafRepo *)ptr->data);\n#endif\n    g_list_free (repos);\n\n    return ret;\n}\n\n#ifdef SEAFILE_SERVER\ngint64\nseafile_count_repos (GError **error)\n{\n    return seaf_repo_manager_count_repos (seaf->repo_mgr, error);\n}\n#endif\n\nGObject*\nseafile_get_repo (const char *repo_id, GError **error)\n{\n    SeafRepo *r;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    r = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    /* Don't return repo that's not checked out. */\n    if (r == NULL)\n        return NULL;\n\n    GObject *repo = convert_repo (r);\n\n#ifdef SEAFILE_SERVER\n    seaf_repo_unref (r);\n#endif\n\n    return repo;\n}\n\nSeafileCommit *\nconvert_to_seafile_commit (SeafCommit *c)\n{\n    SeafileCommit *commit = seafile_commit_new ();\n    g_object_set (commit,\n                  \"id\", c->commit_id,\n                  \"creator_name\", c->creator_name,\n                  \"creator\", c->creator_id,\n                  \"desc\", c->desc,\n                  \"ctime\", c->ctime,\n                  \"repo_id\", c->repo_id,\n                  \"root_id\", c->root_id,\n                  \"parent_id\", c->parent_id,\n                  \"second_parent_id\", c->second_parent_id,\n                  \"version\", c->version,\n                  \"new_merge\", c->new_merge,\n                  \"conflict\", c->conflict,\n                  \"device_name\", c->device_name,\n                  \"client_version\", c->client_version,\n                  NULL);\n    return commit;\n}\n\nGObject*\nseafile_get_commit (const char *repo_id, int version,\n                    const gchar *id, GError **error)\n{\n    SeafileCommit *commit;\n    SeafCommit *c;\n\n    if (!repo_id || !is_uuid_valid(repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (!id || !is_object_id_valid(id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit id\");\n        return NULL;\n    }\n\n    c = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, id);\n    if (!c)\n        return NULL;\n\n    commit = convert_to_seafile_commit (c);\n    seaf_commit_unref (c);\n    return (GObject *)commit;\n}\n\nstruct CollectParam {\n    int offset;\n    int limit;\n    int count;\n    GList *commits;\n#ifdef SEAFILE_SERVER\n    gint64 truncate_time;\n    gboolean traversed_head;\n#endif\n};\n\nstatic gboolean\nget_commit (SeafCommit *c, void *data, gboolean *stop)\n{\n    struct CollectParam *cp = data;\n\n#ifdef SEAFILE_SERVER\n    if (cp->truncate_time == 0)\n    {\n        *stop = TRUE;\n        /* Stop after traversing the head commit. */\n    }\n    /* We use <= here. This is for handling clean trash and history.\n     * If the user cleans all history, truncate time will be equal to\n     * the commit's ctime. In such case, we don't actually want to display\n     * this commit.\n     */\n    else if (cp->truncate_time > 0 &&\n             (gint64)(c->ctime) <= cp->truncate_time &&\n             cp->traversed_head)\n    {\n        /* Still traverse the first commit older than truncate_time.\n         * If a file in the child commit of this commit is deleted,\n         * we need to access this commit in order to restore it\n         * from trash.\n         */\n        *stop = TRUE;\n    }\n\n    /* Always traverse the head commit. */\n    if (!cp->traversed_head)\n        cp->traversed_head = TRUE;\n#endif\n\n    /* if offset = 1, limit = 1, we should stop when the count = 2 */\n    if (cp->limit > 0 && cp->count >= cp->offset + cp->limit) {\n        *stop = TRUE;\n        return TRUE;  /* TRUE to indicate no error */\n    }\n\n    if (cp->count >= cp->offset) {\n        SeafileCommit *commit = convert_to_seafile_commit (c);\n        cp->commits = g_list_prepend (cp->commits, commit);\n    }\n\n    ++cp->count;\n    return TRUE;                /* TRUE to indicate no error */\n}\n\n\nGList*\nseafile_get_commit_list (const char *repo_id,\n                         int offset,\n                         int limit,\n                         GError **error)\n{\n    SeafRepo *repo;\n    GList *commits = NULL;\n    gboolean ret;\n    struct CollectParam cp;\n    char *commit_id;\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    /* correct parameter */\n    if (offset < 0)\n        offset = 0;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, \"No such repository\");\n        return NULL;\n    }\n\n    if (!repo->head) {\n        SeafBranch *branch =\n            seaf_branch_manager_get_branch (seaf->branch_mgr,\n                                            repo->id, \"master\");\n        if (branch != NULL) {\n            commit_id = g_strdup (branch->commit_id);\n            seaf_branch_unref (branch);\n        } else {\n            seaf_warning (\"[repo-mgr] Failed to get repo %s branch master\\n\",\n                       repo_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO,\n                         \"No head and branch master\");\n#ifdef SEAFILE_SERVER\n            seaf_repo_unref (repo);\n#endif\n            return NULL;\n        }\n    } else {\n        commit_id = g_strdup (repo->head->commit_id);\n    }\n\n    /* Init CollectParam */\n    memset (&cp, 0, sizeof(cp));\n    cp.offset = offset;\n    cp.limit = limit;\n\n#ifdef SEAFILE_SERVER\n    cp.truncate_time = seaf_repo_manager_get_repo_truncate_time (seaf->repo_mgr,\n                                                                 repo_id);\n#endif\n\n    ret =\n        seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                  repo->id, repo->version,\n                                                  commit_id, get_commit, &cp, TRUE);\n    g_free (commit_id);\n#ifdef SEAFILE_SERVER\n    seaf_repo_unref (repo);\n#endif\n\n    if (!ret) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_LIST_COMMITS, \"Failed to list commits\");\n        return NULL;\n    }\n\n    commits = g_list_reverse (cp.commits);\n    return commits;\n}\n\n#ifndef SEAFILE_SERVER\nstatic\nint do_unsync_repo(SeafRepo *repo)\n{\n    if (!seaf->started) {\n        seaf_message (\"System not started, skip removing repo.\\n\");\n        return -1;\n    }\n\n    if (repo->auto_sync && (repo->sync_interval == 0))\n        seaf_wt_monitor_unwatch_repo (seaf->wt_monitor, repo->id);\n\n    seaf_sync_manager_cancel_sync_task (seaf->sync_mgr, repo->id);\n\n    SyncInfo *info = seaf_sync_manager_get_sync_info (seaf->sync_mgr, repo->id);\n\n    /* If we are syncing the repo,\n     * we just mark the repo as deleted and let sync-mgr actually delete it.\n     * Otherwise we are safe to delete the repo.\n     */\n    char *worktree = g_strdup (repo->worktree);\n    if (info != NULL && info->in_sync) {\n        seaf_repo_manager_mark_repo_deleted (seaf->repo_mgr, repo);\n    } else {\n        seaf_repo_manager_del_repo (seaf->repo_mgr, repo);\n    }\n\n    g_free (worktree);\n\n    return 0;\n}\n\nstatic void\ncancel_clone_tasks_by_account (const char *account_server, const char *account_email)\n{\n    GList *ptr, *tasks;\n    CloneTask *task;\n\n    tasks = seaf_clone_manager_get_tasks (seaf->clone_mgr);\n    for (ptr = tasks; ptr != NULL; ptr = ptr->next) {\n        task = ptr->data;\n\n        if (g_strcmp0(account_server, task->peer_addr) == 0\n            && g_strcmp0(account_email, task->email) == 0) {\n            seaf_clone_manager_cancel_task (seaf->clone_mgr, task->repo_id);\n        }\n    }\n\n    g_list_free (tasks);\n}\n\nint\nseafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error)\n{\n    if (!server_addr || !email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0);\n    if (!repos) {\n        return 0;\n    }\n\n    for (ptr = repos; ptr; ptr = ptr->next) {\n        SeafRepo *repo = (SeafRepo*)ptr->data;\n        char *addr = NULL;\n        seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr,\n                                              repo->id,\n                                              &addr, /* addr */\n                                              NULL); /* port */\n\n        if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) {\n            if (do_unsync_repo(repo) < 0) {\n                return -1;\n            }\n        }\n\n        g_free (addr);\n    }\n\n    g_list_free (repos);\n\n    cancel_clone_tasks_by_account (server_addr, email);\n\n    return 0;\n}\n\nint\nseafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error)\n{\n    if (!server_addr || !email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    GList *ptr, *repos = seaf_repo_manager_get_repo_list(seaf->repo_mgr, -1, -1, NULL, 0);\n    if (!repos) {\n        return 0;\n    }\n\n    for (ptr = repos; ptr; ptr = ptr->next) {\n        SeafRepo *repo = (SeafRepo*)ptr->data;\n        char *addr = NULL;\n        seaf_repo_manager_get_repo_relay_info(seaf->repo_mgr,\n                                              repo->id,\n                                              &addr, /* addr */\n                                              NULL); /* port */\n\n        if (g_strcmp0(addr, server_addr) == 0 && g_strcmp0(repo->email, email) == 0) {\n            if (seaf_repo_manager_remove_repo_token(seaf->repo_mgr, repo) < 0) {\n                return -1;\n            }\n        }\n\n        g_free (addr);\n    }\n\n    g_list_free (repos);\n\n    cancel_clone_tasks_by_account (server_addr, email);\n\n    return 0;\n}\n\nint\nseafile_set_repo_token (const char *repo_id,\n                        const char *token,\n                        GError **error)\n{\n    int ret;\n\n    if (repo_id == NULL || token == NULL) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    SeafRepo *repo;\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_REPO, \"Can't find Repo %s\", repo_id);\n        return -1;\n    }\n\n    ret = seaf_repo_manager_set_repo_token (seaf->repo_mgr,\n                                            repo, token);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Failed to set token for repo %s\", repo_id);\n        return -1;\n    }\n\n    return 0;\n}\n\n#endif\n\nint\nseafile_destroy_repo (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n#ifndef SEAFILE_SERVER\n    SeafRepo *repo;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"No such repository\");\n        return -1;\n    }\n\n    return do_unsync_repo(repo);\n#else\n\n    return seaf_repo_manager_del_repo (seaf->repo_mgr, repo_id, error);\n#endif\n}\n\n\nGObject *\nseafile_generate_magic_and_random_key(int enc_version,\n                                      const char* repo_id,\n                                      const char *passwd,\n                                      GError **error)\n{\n    if (!repo_id || !passwd) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    gchar salt[65] = {0};\n    gchar magic[65] = {0};\n    gchar pwd_hash[65] = {0};\n    gchar random_key[97] = {0};\n\n    if (enc_version >= 3 && seafile_generate_repo_salt (salt) < 0) {\n        return NULL;\n    }\n\n    seafile_generate_magic (enc_version, repo_id, passwd, salt, magic);\n    if (seafile_generate_random_key (passwd, enc_version, salt, random_key) < 0) {\n        return NULL;\n    }\n\n    SeafileEncryptionInfo *sinfo;\n    sinfo = g_object_new (SEAFILE_TYPE_ENCRYPTION_INFO,\n                          \"repo_id\", repo_id,\n                          \"passwd\", passwd,\n                          \"enc_version\", enc_version,\n                          \"magic\", magic,\n                          \"random_key\", random_key,\n                          NULL);\n    if (enc_version >= 3)\n        g_object_set (sinfo, \"salt\", salt, NULL);\n\n    return (GObject *)sinfo;\n\n}\n\n#include \"diff-simple.h\"\n\ninline static const char*\nget_diff_status_str(char status)\n{\n    if (status == DIFF_STATUS_ADDED)\n        return \"add\";\n    if (status == DIFF_STATUS_DELETED)\n        return \"del\";\n    if (status == DIFF_STATUS_MODIFIED)\n        return \"mod\";\n    if (status == DIFF_STATUS_RENAMED)\n        return \"mov\";\n    if (status == DIFF_STATUS_DIR_ADDED)\n        return \"newdir\";\n    if (status == DIFF_STATUS_DIR_DELETED)\n        return \"deldir\";\n    return NULL;\n}\n\nGList *\nseafile_diff (const char *repo_id, const char *arg1, const char *arg2, int fold_dir_results, GError **error)\n{\n    SeafRepo *repo;\n    char *err_msgs = NULL;\n    GList *diff_entries, *p;\n    GList *ret = NULL;\n\n    if (!repo_id || !arg1 || !arg2) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    if ((arg1[0] != 0 && !is_object_id_valid (arg1)) || !is_object_id_valid(arg2)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"No such repository\");\n        return NULL;\n    }\n\n    diff_entries = seaf_repo_diff (repo, arg1, arg2, fold_dir_results, &err_msgs);\n    if (err_msgs) {\n        g_set_error (error, SEAFILE_DOMAIN, -1, \"%s\", err_msgs);\n        g_free (err_msgs);\n#ifdef SEAFILE_SERVER\n        seaf_repo_unref (repo);\n#endif\n        return NULL;\n    }\n\n#ifdef SEAFILE_SERVER\n    seaf_repo_unref (repo);\n#endif\n\n    for (p = diff_entries; p != NULL; p = p->next) {\n        DiffEntry *de = p->data;\n        SeafileDiffEntry *entry = g_object_new (\n            SEAFILE_TYPE_DIFF_ENTRY,\n            \"status\", get_diff_status_str(de->status),\n            \"name\", de->name,\n            \"new_name\", de->new_name,\n            NULL);\n        ret = g_list_prepend (ret, entry);\n    }\n\n    for (p = diff_entries; p != NULL; p = p->next) {\n        DiffEntry *de = p->data;\n        diff_entry_free (de);\n    }\n    g_list_free (diff_entries);\n\n    return g_list_reverse (ret);\n}\n\n/*\n * RPC functions only available for server.\n */\n\n#ifdef SEAFILE_SERVER\n\nGList *\nseafile_list_dir_by_path(const char *repo_id,\n                         const char *commit_id,\n                         const char *path, GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL;\n    SeafDir *dir;\n    SeafDirent *dent;\n    SeafileDirent *d;\n\n    GList *ptr;\n    GList *res = NULL;\n\n    if (!repo_id || !commit_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Args can't be NULL\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (!is_object_id_valid (commit_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid commit id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return NULL;\n    }\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo_id, repo->version,\n                                             commit_id);\n\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, \"No such commit\");\n        goto out;\n    }\n\n    char *rpath = format_dir_path (path);\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id,\n                                               repo->version,\n                                               commit->root_id,\n                                               rpath, error);\n    g_free (rpath);\n\n    if (!dir) {\n        seaf_warning (\"Can't find seaf dir for %s in repo %s\\n\", path, repo->store_id);\n        goto out;\n    }\n\n    for (ptr = dir->entries; ptr != NULL; ptr = ptr->next) {\n        dent = ptr->data;\n\n        if (!is_object_id_valid (dent->id))\n            continue;\n\n        d = g_object_new (SEAFILE_TYPE_DIRENT,\n                          \"obj_id\", dent->id,\n                          \"obj_name\", dent->name,\n                          \"mode\", dent->mode,\n                          \"version\", dent->version,\n                          \"mtime\", dent->mtime,\n                          \"size\", dent->size,\n                          NULL);\n        res = g_list_prepend (res, d);\n    }\n\n    seaf_dir_free (dir);\n    res = g_list_reverse (res);\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    return res;\n}\n\nstatic void\nfilter_error (GError **error)\n{\n    if (*error && g_error_matches(*error,\n                                  SEAFILE_DOMAIN,\n                                  SEAF_ERR_PATH_NO_EXIST)) {\n        g_clear_error (error);\n    }\n}\n\nchar *\nseafile_get_dir_id_by_commit_and_path(const char *repo_id,\n                                      const char *commit_id,\n                                      const char *path,\n                                      GError **error)\n{\n    SeafRepo *repo = NULL;\n    char *res = NULL;\n    SeafCommit *commit = NULL;\n    SeafDir *dir;\n\n    if (!repo_id || !commit_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Args can't be NULL\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (!is_object_id_valid (commit_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid commit id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return NULL;\n    }\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo_id, repo->version,\n                                             commit_id);\n\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT, \"No such commit\");\n        goto out;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id,\n                                               repo->version,\n                                               commit->root_id,\n                                               rpath, error);\n    g_free (rpath);\n\n    if (!dir) {\n        seaf_warning (\"Can't find seaf dir for %s in repo %s\\n\", path, repo->store_id);\n        filter_error (error);\n        goto out;\n    }\n\n    res = g_strdup (dir->dir_id);\n    seaf_dir_free (dir);\n\n out:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    return res;\n}\n\nint\nseafile_edit_repo (const char *repo_id,\n                   const char *name,\n                   const char *description,\n                   const char *user,\n                   GError **error)\n{\n    return seaf_repo_manager_edit_repo (repo_id, name, description, user, error);\n}\n\nint\nseafile_change_repo_passwd (const char *repo_id,\n                            const char *old_passwd,\n                            const char *new_passwd,\n                            const char *user,\n                            GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL, *parent = NULL;\n    int ret = 0;\n\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"No user given\");\n        return -1;\n    }\n\n    if (!old_passwd || old_passwd[0] == 0 || !new_passwd || new_passwd[0] == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Empty passwd\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\nretry:\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"No such library\");\n        return -1;\n    }\n\n    if (!repo->encrypted) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Repo not encrypted\");\n        return -1;\n    }\n\n    if (repo->enc_version < 2) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Unsupported enc version\");\n        return -1;\n    }\n\n    if (repo->pwd_hash_algo) {\n        if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, old_passwd, repo->salt,\n                                                 repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Incorrect password\");\n            return -1;\n        }\n    } else {\n        if (seafile_verify_repo_passwd (repo_id, old_passwd, repo->magic,\n                                        repo->enc_version, repo->salt) < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Incorrect password\");\n            return -1;\n        }\n    }\n\n    parent = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             repo->head->commit_id);\n    if (!parent) {\n        seaf_warning (\"Failed to get commit %s:%s.\\n\",\n                      repo->id, repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    char new_magic[65], new_pwd_hash[65], new_random_key[97];\n\n    if (repo->pwd_hash_algo) {\n        seafile_generate_pwd_hash (repo->enc_version, repo_id, new_passwd, repo->salt,\n                                   repo->pwd_hash_algo, repo->pwd_hash_params, new_pwd_hash);\n    } else {\n        seafile_generate_magic (repo->enc_version, repo_id, new_passwd, repo->salt,\n                                new_magic);\n    }\n    if (seafile_update_random_key (old_passwd, repo->random_key,\n                                   new_passwd, new_random_key,\n                                   repo->enc_version, repo->salt) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    if (repo->pwd_hash_algo) {\n        memcpy (repo->pwd_hash, new_pwd_hash, 64);\n    } else {\n        memcpy (repo->magic, new_magic, 64);\n    }\n    memcpy (repo->random_key, new_random_key, 96);\n\n    commit = seaf_commit_new (NULL,\n                              repo->id,\n                              parent->root_id,\n                              user,\n                              EMPTY_SHA1,\n                              \"Changed library password\",\n                              0);\n    commit->parent_id = g_strdup(parent->commit_id);\n    seaf_repo_to_commit (repo, commit);\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_branch_set_commit (repo->head, commit->commit_id);\n    if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,\n                                                    repo->head,\n                                                    parent->commit_id,\n                                                    FALSE, NULL, NULL, NULL) < 0) {\n        seaf_repo_unref (repo);\n        seaf_commit_unref (commit);\n        seaf_commit_unref (parent);\n        repo = NULL;\n        commit = NULL;\n        parent = NULL;\n        goto retry;\n    }\n\n    if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user))\n        seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id,\n                                        user, new_passwd, error);\n\nout:\n    seaf_commit_unref (commit);\n    seaf_commit_unref (parent);\n    seaf_repo_unref (repo);\n\n    return ret;\n}\n\nstatic void\nset_pwd_hash_to_commit (SeafCommit *commit,\n                        SeafRepo *repo,\n                        const char *pwd_hash,\n                        const char *pwd_hash_algo,\n                        const char *pwd_hash_params)\n{\n    commit->repo_name = g_strdup (repo->name);\n    commit->repo_desc = g_strdup (repo->desc);\n    commit->encrypted = repo->encrypted;\n    commit->repaired = repo->repaired;\n    if (commit->encrypted) {\n        commit->enc_version = repo->enc_version;\n        if (commit->enc_version == 2) {\n            commit->random_key = g_strdup (repo->random_key);\n        } else if (commit->enc_version == 3) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        } else if (commit->enc_version == 4) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        }\n        commit->pwd_hash = g_strdup (pwd_hash);\n        commit->pwd_hash_algo = g_strdup (pwd_hash_algo);\n        commit->pwd_hash_params = g_strdup (pwd_hash_params);\n    }\n    commit->no_local_history = repo->no_local_history;\n    commit->version = repo->version;\n}\n\nint\nseafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id,\n                                         const char *user,\n                                         const char *passwd,\n                                         const char *pwd_hash_algo,\n                                         const char *pwd_hash_params,\n                                         GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL, *parent = NULL;\n    int ret = 0;\n\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"No user given\");\n        return -1;\n    }\n\n    if (!passwd || passwd[0] == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Empty passwd\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!pwd_hash_algo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid pwd hash algorithm\");\n        return -1;\n    }\n\n    if (g_strcmp0 (pwd_hash_algo, PWD_HASH_PDKDF2) != 0 &&\n        g_strcmp0 (pwd_hash_algo, PWD_HASH_ARGON2ID) != 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Unsupported pwd hash algorithm\");\n        return -1;\n    }\n\n    if (!pwd_hash_params) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid pwd hash params\");\n        return -1;\n    }\n\nretry:\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"No such library\");\n        return -1;\n    }\n\n    if (g_strcmp0 (pwd_hash_algo, repo->pwd_hash_algo) == 0 &&\n        g_strcmp0 (pwd_hash_params, repo->pwd_hash_params) == 0) {\n        goto out;\n    }\n\n    if (!repo->encrypted) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Repo not encrypted\");\n        ret = -1;\n        goto out;\n    }\n\n    if (repo->pwd_hash_algo) {\n        if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo_id, passwd, repo->salt,\n                                                 repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Incorrect password\");\n            ret = 1;\n            goto out;\n        }\n    } else {\n        if (seafile_verify_repo_passwd (repo_id, passwd, repo->magic,\n                                        repo->enc_version, repo->salt) < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Incorrect password\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    parent = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             repo->head->commit_id);\n    if (!parent) {\n        seaf_warning (\"Failed to get commit %s:%s.\\n\",\n                      repo->id, repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    char new_pwd_hash[65]= {0};\n\n    seafile_generate_pwd_hash (repo->enc_version, repo_id, passwd, repo->salt,\n                               pwd_hash_algo, pwd_hash_params, new_pwd_hash);\n\n    // To prevent clients that have already synced this repo from overwriting the modified encryption algorithm,\n    // delete all sync tokens.\n    if (seaf_delete_repo_tokens (repo) < 0) {\n        seaf_warning (\"Failed to delete repo sync tokens, abort change pwd hash algorithm.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    memcpy (repo->pwd_hash, new_pwd_hash, 64);\n\n    commit = seaf_commit_new (NULL,\n                              repo->id,\n                              parent->root_id,\n                              user,\n                              EMPTY_SHA1,\n                              \"Changed library password hash algorithm\",\n                              0);\n    commit->parent_id = g_strdup(parent->commit_id);\n    set_pwd_hash_to_commit (commit, repo, new_pwd_hash, pwd_hash_algo, pwd_hash_params);\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_branch_set_commit (repo->head, commit->commit_id);\n    if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,\n                                                    repo->head,\n                                                    parent->commit_id,\n                                                    FALSE, NULL, NULL, NULL) < 0) {\n        seaf_repo_unref (repo);\n        seaf_commit_unref (commit);\n        seaf_commit_unref (parent);\n        repo = NULL;\n        commit = NULL;\n        parent = NULL;\n        goto retry;\n    }\n\n    if (seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr, repo_id, user))\n        seaf_passwd_manager_set_passwd (seaf->passwd_mgr, repo_id,\n                                        user, passwd, error);\n\nout:\n    seaf_commit_unref (commit);\n    seaf_commit_unref (parent);\n    seaf_repo_unref (repo);\n\n    return ret;\n}\n\nint\nseafile_is_repo_owner (const char *email,\n                       const char *repo_id,\n                       GError **error)\n{\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return 0;\n    }\n\n    char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n    if (!owner) {\n        /* seaf_warning (\"Failed to get owner info for repo %s.\\n\", repo_id); */\n        return 0;\n    }\n\n    if (strcmp(owner, email) != 0) {\n        g_free (owner);\n        return 0;\n    }\n\n    g_free (owner);\n    return 1;\n}\n\nint\nseafile_set_repo_owner(const char *repo_id, const char *email,\n                       GError **error)\n{\n    if (!repo_id || !email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    return seaf_repo_manager_set_repo_owner(seaf->repo_mgr, repo_id, email);\n}\n\nchar *\nseafile_get_repo_owner (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n    /* if (!owner){ */\n    /*     seaf_warning (\"Failed to get repo owner for repo %s.\\n\", repo_id); */\n    /* } */\n\n    return owner;\n}\n\nGList *\nseafile_get_orphan_repo_list(GError **error)\n{\n    GList *ret = NULL;\n    GList *repos, *ptr;\n\n    repos = seaf_repo_manager_get_orphan_repo_list(seaf->repo_mgr);\n    ret = convert_repo_list (repos);\n\n    for (ptr = repos; ptr; ptr = ptr->next) {\n        seaf_repo_unref ((SeafRepo *)ptr->data);\n    }\n    g_list_free (repos);\n\n    return ret;\n}\n\nGList *\nseafile_list_owned_repos (const char *email, int ret_corrupted,\n                          int start, int limit, GError **error)\n{\n    GList *ret = NULL;\n    GList *repos, *ptr;\n\n    repos = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, email, ret_corrupted,\n                                                  start, limit, NULL);\n    ret = convert_repo_list (repos);\n\n    /* for (ptr = ret; ptr; ptr = ptr->next) { */\n    /*     g_object_get (ptr->data, \"repo_id\", &repo_id, NULL); */\n    /*     is_shared = seaf_share_manager_is_repo_shared (seaf->share_mgr, repo_id); */\n    /*     if (is_shared < 0) { */\n    /*         g_free (repo_id); */\n    /*         break; */\n    /*     } else { */\n    /*         g_object_set (ptr->data, \"is_shared\", is_shared, NULL); */\n    /*         g_free (repo_id); */\n    /*     } */\n    /* } */\n\n    /* while (ptr) { */\n    /*     g_object_set (ptr->data, \"is_shared\", FALSE, NULL); */\n    /*     ptr = ptr->prev; */\n    /* } */\n\n    for(ptr = repos; ptr; ptr = ptr->next) {\n        seaf_repo_unref ((SeafRepo *)ptr->data);\n    }\n    g_list_free (repos);\n\n    return ret;\n}\n\nGList *\nseafile_search_repos_by_name (const char *name, GError **error)\n{\n    GList *ret = NULL;\n    GList *repos, *ptr;\n\n    repos = seaf_repo_manager_search_repos_by_name (seaf->repo_mgr, name);\n    ret = convert_repo_list (repos);\n\n    for (ptr = repos; ptr; ptr = ptr->next) {\n        seaf_repo_unref ((SeafRepo *)ptr->data);\n    }\n    g_list_free (repos);\n\n    return g_list_reverse(ret);\n}\n\ngint64\nseafile_get_user_quota_usage (const char *email, GError **error)\n{\n    gint64 ret;\n\n    if (!email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad user id\");\n        return -1;\n    }\n\n    ret = seaf_quota_manager_get_user_usage (seaf->quota_mgr, email);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal server error\");\n        return -1;\n    }\n\n    return ret;\n}\n\ngint64\nseafile_get_user_share_usage (const char *email, GError **error)\n{\n    gint64 ret;\n\n    if (!email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad user id\");\n        return -1;\n    }\n\n    ret = seaf_quota_manager_get_user_share_usage (seaf->quota_mgr, email);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal server error\");\n        return -1;\n    }\n\n    return ret;\n}\n\ngint64\nseafile_server_repo_size(const char *repo_id, GError **error)\n{\n    gint64 ret;\n\n    if (!repo_id || strlen(repo_id) != 36) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_get_repo_size (seaf->repo_mgr, repo_id);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal server error\");\n        return -1;\n    }\n\n    return ret;\n}\n\nint\nseafile_set_repo_history_limit (const char *repo_id,\n                                int days,\n                                GError **error)\n{\n    if (!repo_id || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_set_repo_history_limit (seaf->repo_mgr,\n                                                  repo_id,\n                                                  days) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB Error\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_get_repo_history_limit (const char *repo_id,\n                                GError **error)\n{\n    if (!repo_id || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    return  seaf_repo_manager_get_repo_history_limit (seaf->repo_mgr, repo_id);\n}\n\nint\nseafile_set_repo_valid_since (const char *repo_id,\n                              gint64 timestamp,\n                              GError **error)\n{\n    return seaf_repo_manager_set_repo_valid_since (seaf->repo_mgr,\n                                                   repo_id,\n                                                   timestamp);\n}\n\nint\nseafile_repo_set_access_property (const char *repo_id, const char *ap, GError **error)\n{\n    int ret;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (strlen(repo_id) != 36) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Wrong repo id\");\n        return -1;\n    }\n\n    if (g_strcmp0(ap, \"public\") != 0 && g_strcmp0(ap, \"own\") != 0 && g_strcmp0(ap, \"private\") != 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Wrong access property\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_set_access_property (seaf->repo_mgr, repo_id, ap);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal server error\");\n        return -1;\n    }\n\n    return ret;\n}\n\nchar *\nseafile_repo_query_access_property (const char *repo_id, GError **error)\n{\n    char *ret;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (strlen(repo_id) != 36) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Wrong repo id\");\n        return NULL;\n    }\n\n    ret = seaf_repo_manager_query_access_property (seaf->repo_mgr, repo_id);\n\n    return ret;\n}\n\nchar *\nseafile_web_get_access_token (const char *repo_id,\n                              const char *obj_id,\n                              const char *op,\n                              const char *username,\n                              int use_onetime,\n                              GError **error)\n{\n    char *token;\n\n    if (!repo_id || !obj_id || !op || !username) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Missing args\");\n        return NULL;\n    }\n\n    token = seaf_web_at_manager_get_access_token (seaf->web_at_mgr,\n                                                  repo_id, obj_id, op,\n                                                  username, use_onetime, error);\n    return token;\n}\n\nGObject *\nseafile_web_query_access_token (const char *token, GError **error)\n{\n    SeafileWebAccess *webaccess = NULL;\n\n    if (!token) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Token should not be null\");\n        return NULL;\n    }\n\n    webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr,\n                                                        token);\n    if (webaccess)\n        return (GObject *)webaccess;\n\n    return NULL;\n}\n\nchar *\nseafile_query_zip_progress (const char *token, GError **error)\n{\n#ifdef HAVE_EVHTP\n    return zip_download_mgr_query_zip_progress (seaf->zip_download_mgr,\n                                                token, error);\n#else\n    return NULL;\n#endif\n}\n\nint\nseafile_cancel_zip_task (const char *token, GError **error)\n{\n#ifdef HAVE_EVHTP\n    return zip_download_mgr_cancel_zip_task (seaf->zip_download_mgr,\n                                             token);\n#else\n    return 0;\n#endif\n}\n\nint\nseafile_add_share (const char *repo_id, const char *from_email,\n                   const char *to_email, const char *permission, GError **error)\n{\n    int ret;\n\n    if (!repo_id || !from_email || !to_email || !permission) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Missing args\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (g_strcmp0 (from_email, to_email) == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Can not share repo to myself\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n    }\n\n    ret = seaf_share_manager_add_share (seaf->share_mgr, repo_id, from_email,\n                                        to_email, permission);\n\n    return ret;\n}\n\nGList *\nseafile_list_share_repos (const char *email, const char *type,\n                          int start, int limit, GError **error)\n{\n    if (g_strcmp0 (type, \"from_email\") != 0 &&\n        g_strcmp0 (type, \"to_email\") != 0 ) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Wrong type argument\");\n        return NULL;\n    }\n\n    return seaf_share_manager_list_share_repos (seaf->share_mgr,\n                                                email, type,\n                                                start, limit,\n                                                NULL);\n}\n\nGList *\nseafile_list_repo_shared_to (const char *from_user, const char *repo_id,\n                             GError **error)\n{\n\n    if (!from_user || !repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Missing args\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    return seaf_share_manager_list_repo_shared_to (seaf->share_mgr,\n                                                   from_user, repo_id,\n                                                   error);\n}\n\nchar *\nseafile_share_subdir_to_user (const char *repo_id,\n                              const char *path,\n                              const char *owner,\n                              const char *share_user,\n                              const char *permission,\n                              const char *passwd,\n                              GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return NULL;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return NULL;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return NULL;\n    }\n\n    if (is_empty_string (share_user)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_user parameter\");\n        return NULL;\n    }\n\n    if (strcmp (owner, share_user) == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Can't share subdir to myself\");\n        return NULL;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return NULL;\n    }\n\n    char *real_path;\n    char *vrepo_name;\n    char *vrepo_id;\n    char *ret = NULL;\n\n    real_path = format_dir_path (path);\n    // Use subdir name as virtual repo name and description\n    vrepo_name = g_path_get_basename (real_path);\n    vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,\n                                                      repo_id, real_path,\n                                                      vrepo_name, vrepo_name,\n                                                      owner, passwd, error);\n    if (!vrepo_id)\n        goto out;\n\n    int result = seaf_share_manager_add_share (seaf->share_mgr, vrepo_id, owner,\n                                        share_user, permission);\n    if (result < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to share subdir to user\");\n        g_free (vrepo_id);\n    }\n    else \n        ret = vrepo_id;\n\nout:\n    g_free (vrepo_name);\n    g_free (real_path);\n    return ret;\n}\n\nint\nseafile_unshare_subdir_for_user (const char *repo_id,\n                                 const char *path,\n                                 const char *owner,\n                                 const char *share_user,\n                                 GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (share_user) ||\n        strcmp (owner, share_user) == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_user parameter\");\n        return -1;\n    }\n\n    char *real_path;\n    int ret = 0;\n\n    real_path = format_dir_path (path);\n\n    ret = seaf_share_manager_unshare_subdir (seaf->share_mgr,\n                                             repo_id, real_path, owner, share_user);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to unshare subdir for user\");\n    }\n\n    g_free (real_path);\n    return ret;\n}\n\nint\nseafile_update_share_subdir_perm_for_user (const char *repo_id,\n                                           const char *path,\n                                           const char *owner,\n                                           const char *share_user,\n                                           const char *permission,\n                                           GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (share_user) ||\n        strcmp (owner, share_user) == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_user parameter\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n    }\n\n    char *real_path;\n    int ret = 0;\n\n    real_path = format_dir_path (path);\n\n    ret = seaf_share_manager_set_subdir_perm_by_path (seaf->share_mgr,\n                                                      repo_id, owner, share_user,\n                                                      permission, real_path);\n\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to update share subdir permission for user\");\n    }\n\n    g_free (real_path);\n    return ret;\n}\n\nGList *\nseafile_list_repo_shared_group (const char *from_user, const char *repo_id,\n                                GError **error)\n{\n\n    if (!from_user || !repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Missing args\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    return seaf_share_manager_list_repo_shared_group (seaf->share_mgr,\n                                                      from_user, repo_id,\n                                                      error);\n}\n\nint\nseafile_remove_share (const char *repo_id, const char *from_email,\n                      const char *to_email, GError **error)\n{\n    int ret;\n\n    if (!repo_id || !from_email ||!to_email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Missing args\");\n        return -1;\n    }\n\n    ret = seaf_share_manager_remove_share (seaf->share_mgr, repo_id, from_email,\n                                           to_email);\n\n    return ret;\n}\n\n/* Group repo RPC. */\n\nint\nseafile_group_share_repo (const char *repo_id, int group_id,\n                          const char *user_name, const char *permission,\n                          GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    int ret;\n\n    if (group_id <= 0 || !user_name || !repo_id || !permission) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad input argument\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_add_group_repo (mgr, repo_id, group_id, user_name,\n                                            permission, error);\n\n    return ret;\n}\n\nint\nseafile_group_unshare_repo (const char *repo_id, int group_id,\n                            const char *user_name, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    int ret;\n\n    if (!user_name || !repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"User name and repo id can not be NULL\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    ret = seaf_repo_manager_del_group_repo (mgr, repo_id, group_id, error);\n\n    return ret;\n\n}\n\nchar *\nseafile_share_subdir_to_group (const char *repo_id,\n                               const char *path,\n                               const char *owner,\n                               int share_group,\n                               const char *permission,\n                               const char *passwd,\n                               GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return NULL;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return NULL;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return NULL;\n    }\n\n    if (share_group < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_group parameter\");\n        return NULL;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return NULL;\n    }\n\n    char *real_path;\n    char *vrepo_name;\n    char *vrepo_id;\n    char* ret = NULL;\n\n    real_path = format_dir_path (path);\n    // Use subdir name as virtual repo name and description\n    vrepo_name = g_path_get_basename (real_path);\n    vrepo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,\n                                                      repo_id, real_path,\n                                                      vrepo_name, vrepo_name,\n                                                      owner, passwd, error);\n    if (!vrepo_id)\n        goto out;\n\n    int result = seaf_repo_manager_add_group_repo (seaf->repo_mgr, vrepo_id, share_group,\n                                            owner, permission, error);\n    if (result < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to share subdir to group\");\n        g_free (vrepo_id);\n    }\n    else\n        ret = vrepo_id;\n\nout:\n    g_free (vrepo_name);\n    g_free (real_path);\n    return ret;\n}\n\nint\nseafile_unshare_subdir_for_group (const char *repo_id,\n                                  const char *path,\n                                  const char *owner,\n                                  int share_group,\n                                  GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return -1;\n    }\n\n    if (share_group < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_group parameter\");\n        return -1;\n    }\n\n    char *real_path;\n    int ret = 0;\n\n    real_path = format_dir_path (path);\n\n    ret = seaf_share_manager_unshare_group_subdir (seaf->share_mgr, repo_id,\n                                                   real_path, owner, share_group);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to unshare subdir for group\");\n    }\n\n    g_free (real_path);\n    return ret;\n}\n\nint\nseafile_update_share_subdir_perm_for_group (const char *repo_id,\n                                            const char *path,\n                                            const char *owner,\n                                            int share_group,\n                                            const char *permission,\n                                            GError **error)\n{\n    if (is_empty_string (repo_id) || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (path) || strcmp (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid path parameter\");\n        return -1;\n    }\n\n    if (is_empty_string (owner)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid owner parameter\");\n        return -1;\n    }\n\n    if (share_group < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid share_group parameter\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n    }\n\n    char *real_path;\n    int ret = 0;\n\n    real_path = format_dir_path (path);\n    ret = seaf_repo_manager_set_subdir_group_perm_by_path (seaf->repo_mgr,\n                                                           repo_id, owner, share_group,\n                                                           permission, real_path);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to update share subdir permission for group\");\n    }\n\n    g_free (real_path);\n    return ret;\n}\n\nchar *\nseafile_get_shared_groups_by_repo(const char *repo_id, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    GList *group_ids = NULL, *ptr;\n    GString *result;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    group_ids = seaf_repo_manager_get_groups_by_repo (mgr, repo_id, error);\n    if (!group_ids) {\n        return NULL;\n    }\n\n    result = g_string_new(\"\");\n    ptr = group_ids;\n    while (ptr) {\n        g_string_append_printf (result, \"%d\\n\", (int)(long)ptr->data);\n        ptr = ptr->next;\n    }\n    g_list_free (group_ids);\n\n    return g_string_free (result, FALSE);\n}\n\nchar *\nseafile_get_group_repoids (int group_id, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    GList *repo_ids = NULL, *ptr;\n    GString *result;\n\n    repo_ids = seaf_repo_manager_get_group_repoids (mgr, group_id, error);\n    if (!repo_ids) {\n        return NULL;\n    }\n\n    result = g_string_new(\"\");\n    ptr = repo_ids;\n    while (ptr) {\n        g_string_append_printf (result, \"%s\\n\", (char *)ptr->data);\n        g_free (ptr->data);\n        ptr = ptr->next;\n    }\n    g_list_free (repo_ids);\n\n    return g_string_free (result, FALSE);\n}\n\nGList *\nseafile_get_repos_by_group (int group_id, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    GList *ret = NULL;\n\n    if (group_id < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid group id.\");\n        return NULL;\n    }\n\n    ret = seaf_repo_manager_get_repos_by_group (mgr, group_id, error);\n\n    return ret;\n}\n\nGList *\nseafile_get_group_repos_by_owner (char *user, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    GList *ret = NULL;\n\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"user name can not be NULL\");\n        return NULL;\n    }\n\n    ret = seaf_repo_manager_get_group_repos_by_owner (mgr, user, error);\n    if (!ret) {\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nchar *\nseafile_get_group_repo_owner (const char *repo_id, GError **error)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    GString *result = g_string_new (\"\");\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *share_from = seaf_repo_manager_get_group_repo_owner (mgr, repo_id,\n                                                               error);\n    if (share_from) {\n        g_string_append_printf (result, \"%s\", share_from);\n        g_free (share_from);\n    }\n\n    return g_string_free (result, FALSE);\n}\n\nint\nseafile_remove_repo_group(int group_id, const char *username, GError **error)\n{\n    if (group_id <= 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Wrong group id argument\");\n        return -1;\n    }\n\n    return seaf_repo_manager_remove_group_repos (seaf->repo_mgr,\n                                                 group_id, username,\n                                                 error);\n}\n\n/* Inner public repo RPC */\n\nint\nseafile_set_inner_pub_repo (const char *repo_id,\n                            const char *permission,\n                            GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad args\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_set_inner_pub_repo (seaf->repo_mgr,\n                                              repo_id, permission) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal error\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_unset_inner_pub_repo (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad args\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_unset_inner_pub_repo (seaf->repo_mgr, repo_id) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal error\");\n        return -1;\n    }\n\n    return 0;\n}\n\nGList *\nseafile_list_inner_pub_repos (GError **error)\n{\n    return seaf_repo_manager_list_inner_pub_repos (seaf->repo_mgr, NULL);\n}\n\ngint64\nseafile_count_inner_pub_repos (GError **error)\n{\n    return seaf_repo_manager_count_inner_pub_repos (seaf->repo_mgr);\n}\n\nGList *\nseafile_list_inner_pub_repos_by_owner (const char *user, GError **error)\n{\n    if (!user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Bad arguments\");\n        return NULL;\n    }\n\n    return seaf_repo_manager_list_inner_pub_repos_by_owner (seaf->repo_mgr, user);\n}\n\nint\nseafile_is_inner_pub_repo (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Bad arguments\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    return seaf_repo_manager_is_inner_pub_repo (seaf->repo_mgr, repo_id);\n}\n\ngint64\nseafile_get_file_size (const char *store_id, int version,\n                       const char *file_id, GError **error)\n{\n    gint64 file_size;\n\n    if (!store_id || !is_uuid_valid(store_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid store id\");\n        return -1;\n    }\n\n    if (!file_id || !is_object_id_valid (file_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid file id\");\n        return -1;\n    }\n\n    file_size = seaf_fs_manager_get_file_size (seaf->fs_mgr, store_id, version, file_id);\n    if (file_size < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"failed to read file size\");\n        return -1;\n    }\n\n    return file_size;\n}\n\ngint64\nseafile_get_dir_size (const char *store_id, int version,\n                      const char *dir_id, GError **error)\n{\n    gint64 dir_size;\n\n    if (!store_id || !is_uuid_valid (store_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid store id\");\n        return -1;\n    }\n\n    if (!dir_id || !is_object_id_valid (dir_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid dir id\");\n        return -1;\n    }\n\n    dir_size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, store_id, version, dir_id);\n    if (dir_size < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Failed to caculate dir size\");\n        return -1;\n    }\n\n    return dir_size;\n}\n\nint\nseafile_check_passwd (const char *repo_id,\n                      const char *magic,\n                      GError **error)\n{\n    if (!repo_id || strlen(repo_id) != 36 || !magic) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (seaf_passwd_manager_check_passwd (seaf->passwd_mgr,\n                                          repo_id, magic,\n                                          error) < 0) {\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_set_passwd (const char *repo_id,\n                    const char *user,\n                    const char *passwd,\n                    GError **error)\n{\n    if (!repo_id || strlen(repo_id) != 36 || !user || !passwd) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (seaf_passwd_manager_set_passwd (seaf->passwd_mgr,\n                                        repo_id, user, passwd,\n                                        error) < 0) {\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_unset_passwd (const char *repo_id,\n                      const char *user,\n                      GError **error)\n{\n    if (!repo_id || strlen(repo_id) != 36 || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (seaf_passwd_manager_unset_passwd (seaf->passwd_mgr,\n                                          repo_id, user,\n                                          error) < 0) {\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_is_passwd_set (const char *repo_id, const char *user, GError **error)\n{\n    if (!repo_id || strlen(repo_id) != 36 || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    return seaf_passwd_manager_is_passwd_set (seaf->passwd_mgr,\n                                              repo_id, user);\n}\n\nGObject *\nseafile_get_decrypt_key (const char *repo_id, const char *user, GError **error)\n{\n    SeafileCryptKey *ret;\n\n    if (!repo_id || strlen(repo_id) != 36 || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    ret = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                               repo_id, user);\n    if (!ret) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Password was not set\");\n        return NULL;\n    }\n\n    return (GObject *)ret;\n}\n\nint\nseafile_revert_on_server (const char *repo_id,\n                          const char *commit_id,\n                          const char *user_name,\n                          GError **error)\n{\n    if (!repo_id || strlen(repo_id) != 36 ||\n        !commit_id || strlen(commit_id) != 40 ||\n        !user_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_object_id_valid (commit_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit id\");\n        return -1;\n    }\n\n    return seaf_repo_manager_revert_on_server (seaf->repo_mgr,\n                                               repo_id,\n                                               commit_id,\n                                               user_name,\n                                               error);\n}\n\nint\nseafile_post_file (const char *repo_id, const char *temp_file_path,\n                   const char *parent_dir, const char *file_name,\n                   const char *user,\n                   GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_file_name = normalize_utf8_path (file_name);\n    if (!norm_file_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    if (seaf_repo_manager_post_file (seaf->repo_mgr, repo_id,\n                                     temp_file_path, rpath,\n                                     norm_file_name, user,\n                                     error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_file_name);\n    g_free (rpath);\n\n    return ret;\n}\n\n/* char * */\n/* seafile_post_file_blocks (const char *repo_id, */\n/*                           const char *parent_dir, */\n/*                           const char *file_name, */\n/*                           const char *blockids_json, */\n/*                           const char *paths_json, */\n/*                           const char *user, */\n/*                           gint64 file_size, */\n/*                           int replace_existed, */\n/*                           GError **error) */\n/* { */\n/*     char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */\n/*     char *new_id = NULL; */\n\n/*     if (!repo_id || !parent_dir || !file_name */\n/*         || !blockids_json || ! paths_json || !user || file_size < 0) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Argument should not be null\"); */\n/*         return NULL; */\n/*     } */\n\n/*     if (!is_uuid_valid (repo_id)) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\"); */\n/*         return NULL; */\n/*     } */\n\n/*     norm_parent_dir = normalize_utf8_path (parent_dir); */\n/*     if (!norm_parent_dir) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Path is in valid UTF8 encoding\"); */\n/*         goto out; */\n/*     } */\n\n/*     norm_file_name = normalize_utf8_path (file_name); */\n/*     if (!norm_file_name) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Path is in valid UTF8 encoding\"); */\n/*         goto out; */\n/*     } */\n\n/*     rpath = format_dir_path (norm_parent_dir); */\n\n/*     seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */\n/*                                         repo_id, */\n/*                                         rpath, */\n/*                                         norm_file_name, */\n/*                                         blockids_json, */\n/*                                         paths_json, */\n/*                                         user, */\n/*                                         file_size, */\n/*                                         replace_existed, */\n/*                                         &new_id, */\n/*                                         error); */\n\n/* out: */\n/*     g_free (norm_parent_dir); */\n/*     g_free (norm_file_name); */\n/*     g_free (rpath); */\n\n/*     return new_id; */\n/* } */\n\nchar *\nseafile_post_multi_files (const char *repo_id,\n                          const char *parent_dir,\n                          const char *filenames_json,\n                          const char *paths_json,\n                          const char *user,\n                          int replace_existed,\n                          GError **error)\n{\n    char *norm_parent_dir = NULL, *rpath = NULL;\n    char *ret_json = NULL;\n\n    if (!repo_id || !filenames_json || !parent_dir || !paths_json || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    seaf_repo_manager_post_multi_files (seaf->repo_mgr,\n                                        repo_id,\n                                        rpath,\n                                        filenames_json,\n                                        paths_json,\n                                        user,\n                                        replace_existed,\n                                        0,\n                                        &ret_json,\n                                        NULL,\n                                        error);\n\nout:\n    g_free (norm_parent_dir);\n    g_free (rpath);\n\n    return ret_json;\n}\n\nchar *\nseafile_put_file (const char *repo_id, const char *temp_file_path,\n                  const char *parent_dir, const char *file_name,\n                  const char *user, const char *head_id,\n                  GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;\n    char *new_file_id = NULL;\n\n    if (!repo_id || !temp_file_path || !parent_dir || !file_name || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_file_name = normalize_utf8_path (file_name);\n    if (!norm_file_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    seaf_repo_manager_put_file (seaf->repo_mgr, repo_id,\n                                temp_file_path, rpath,\n                                norm_file_name, user, head_id,\n                                0,\n                                &new_file_id, error);\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_file_name);\n    g_free (rpath);\n\n    return new_file_id;\n}\n\n/* char * */\n/* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */\n/*                          const char *file_name, const char *blockids_json, */\n/*                          const char *paths_json, const char *user, */\n/*                          const char *head_id, gint64 file_size, GError **error) */\n/* { */\n/*     char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL; */\n/*     char *new_file_id = NULL; */\n\n/*     if (!repo_id || !parent_dir || !file_name */\n/*         || !blockids_json || ! paths_json || !user) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Argument should not be null\"); */\n/*         return NULL; */\n/*     } */\n\n/*     if (!is_uuid_valid (repo_id)) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\"); */\n/*         return NULL; */\n/*     } */\n\n/*     norm_parent_dir = normalize_utf8_path (parent_dir); */\n/*     if (!norm_parent_dir) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Path is in valid UTF8 encoding\"); */\n/*         goto out; */\n/*     } */\n\n/*     norm_file_name = normalize_utf8_path (file_name); */\n/*     if (!norm_file_name) { */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Path is in valid UTF8 encoding\"); */\n/*         goto out; */\n/*     } */\n\n/*     rpath = format_dir_path (norm_parent_dir); */\n\n/*     seaf_repo_manager_put_file_blocks (seaf->repo_mgr, repo_id, */\n/*                                        rpath, norm_file_name, */\n/*                                        blockids_json, paths_json, */\n/*                                        user, head_id, file_size, */\n/*                                        &new_file_id, error); */\n\n/* out: */\n/*     g_free (norm_parent_dir); */\n/*     g_free (norm_file_name); */\n/*     g_free (rpath); */\n\n/*     return new_file_id; */\n/* } */\n\nint\nseafile_post_dir (const char *repo_id, const char *parent_dir,\n                  const char *new_dir_name, const char *user,\n                  GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_dir_name = NULL, *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !parent_dir || !new_dir_name || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_dir_name = normalize_utf8_path (new_dir_name);\n    if (!norm_dir_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    if (seaf_repo_manager_post_dir (seaf->repo_mgr, repo_id,\n                                    rpath, norm_dir_name,\n                                    user, error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_dir_name);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_post_empty_file (const char *repo_id, const char *parent_dir,\n                         const char *new_file_name, const char *user,\n                         GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !parent_dir || !new_file_name || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_file_name = normalize_utf8_path (new_file_name);\n    if (!norm_file_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    if (seaf_repo_manager_post_empty_file (seaf->repo_mgr, repo_id,\n                                           rpath, norm_file_name,\n                                           user, error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_file_name);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_del_file (const char *repo_id, const char *parent_dir,\n                  const char *file_name, const char *user,\n                  GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_file_name = NULL, *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !parent_dir || !file_name || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_file_name = normalize_utf8_path (file_name);\n    if (!norm_file_name) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    if (seaf_repo_manager_del_file (seaf->repo_mgr, repo_id,\n                                    rpath, norm_file_name,\n                                    user, error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_file_name);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_batch_del_files (const char *repo_id,\n                         const char *filepaths,\n                         const char *user,\n                         GError **error)\n{\n    char *norm_file_list = NULL, *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !filepaths || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n\n    norm_file_list = normalize_utf8_path (filepaths);\n    if (!norm_file_list) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    if (seaf_repo_manager_batch_del_files (seaf->repo_mgr, repo_id,\n                                           norm_file_list,\n                                           user, error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_file_list);\n\n    return ret;\n}\n\nGObject *\nseafile_copy_file (const char *src_repo_id,\n                   const char *src_dir,\n                   const char *src_filename,\n                   const char *dst_repo_id,\n                   const char *dst_dir,\n                   const char *dst_filename,\n                   const char *user,\n                   int need_progress,\n                   int synchronous,\n                   GError **error)\n{\n    char *norm_src_dir = NULL, *norm_src_filename = NULL;\n    char *norm_dst_dir = NULL, *norm_dst_filename = NULL;\n    char *rsrc_dir = NULL, *rdst_dir = NULL;\n    GObject *ret = NULL;\n\n    if (!src_repo_id || !src_dir || !src_filename ||\n        !dst_repo_id || !dst_dir || !dst_filename || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    norm_src_dir = normalize_utf8_path (src_dir);\n    if (!norm_src_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_src_filename = normalize_utf8_path (src_filename);\n    if (!norm_src_filename) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_dst_dir = normalize_utf8_path (dst_dir);\n    if (!norm_dst_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_dst_filename = normalize_utf8_path (dst_filename);\n    if (!norm_dst_filename) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    rsrc_dir = format_dir_path (norm_src_dir);\n    rdst_dir = format_dir_path (norm_dst_dir);\n\n    ret = (GObject *)seaf_repo_manager_copy_multiple_files (seaf->repo_mgr,\n                                                            src_repo_id, rsrc_dir, norm_src_filename,\n                                                            dst_repo_id, rdst_dir, norm_dst_filename,\n                                                            user, need_progress, synchronous,\n                                                            error);\n\nout:\n    g_free (norm_src_dir);\n    g_free (norm_src_filename);\n    g_free (norm_dst_dir);\n    g_free (norm_dst_filename);\n    g_free (rsrc_dir);\n    g_free (rdst_dir);\n\n    return ret;\n}\n\nGObject *\nseafile_move_file (const char *src_repo_id,\n                   const char *src_dir,\n                   const char *src_filename,\n                   const char *dst_repo_id,\n                   const char *dst_dir,\n                   const char *dst_filename,\n                   int replace,\n                   const char *user,\n                   int need_progress,\n                   int synchronous,\n                   GError **error)\n{\n    char *norm_src_dir = NULL, *norm_src_filename = NULL;\n    char *norm_dst_dir = NULL, *norm_dst_filename = NULL;\n    char *rsrc_dir = NULL, *rdst_dir = NULL;\n    GObject *ret = NULL;\n\n    if (!src_repo_id || !src_dir || !src_filename ||\n        !dst_repo_id || !dst_dir || !dst_filename || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (src_repo_id) || !is_uuid_valid(dst_repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    norm_src_dir = normalize_utf8_path (src_dir);\n    if (!norm_src_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_src_filename = normalize_utf8_path (src_filename);\n    if (!norm_src_filename) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_dst_dir = normalize_utf8_path (dst_dir);\n    if (!norm_dst_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    norm_dst_filename = normalize_utf8_path (dst_filename);\n    if (!norm_dst_filename) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        goto out;\n    }\n\n    rsrc_dir = format_dir_path (norm_src_dir);\n    rdst_dir = format_dir_path (norm_dst_dir);\n\n    ret = (GObject *)seaf_repo_manager_move_multiple_files (seaf->repo_mgr,\n                                                            src_repo_id, rsrc_dir, norm_src_filename,\n                                                            dst_repo_id, rdst_dir, norm_dst_filename,\n                                                            replace, user, need_progress, synchronous,\n                                                            error);\n\nout:\n    g_free (norm_src_dir);\n    g_free (norm_src_filename);\n    g_free (norm_dst_dir);\n    g_free (norm_dst_filename);\n    g_free (rsrc_dir);\n    g_free (rdst_dir);\n\n    return ret;\n}\n\nGObject *\nseafile_get_copy_task (const char *task_id, GError **error)\n{\n    return (GObject *)seaf_copy_manager_get_task (seaf->copy_mgr, task_id);\n}\n\nint\nseafile_cancel_copy_task (const char *task_id, GError **error)\n{\n    return seaf_copy_manager_cancel_task (seaf->copy_mgr, task_id);\n}\n\nint\nseafile_rename_file (const char *repo_id,\n                     const char *parent_dir,\n                     const char *oldname,\n                     const char *newname,\n                     const char *user,\n                     GError **error)\n{\n    char *norm_parent_dir = NULL, *norm_oldname = NULL, *norm_newname = NULL;\n    char *rpath = NULL;\n    int ret = 0;\n\n    if (!repo_id || !parent_dir || !oldname || !newname || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    if (!norm_parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_oldname = normalize_utf8_path (oldname);\n    if (!norm_oldname) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    norm_newname = normalize_utf8_path (newname);\n    if (!norm_newname) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Path is in valid UTF8 encoding\");\n        ret = -1;\n        goto out;\n    }\n\n    rpath = format_dir_path (norm_parent_dir);\n\n    if (seaf_repo_manager_rename_file (seaf->repo_mgr, repo_id,\n                                       rpath, norm_oldname, norm_newname,\n                                       user, error) < 0) {\n        ret = -1;\n    }\n\nout:\n    g_free (norm_parent_dir);\n    g_free (norm_oldname);\n    g_free (norm_newname);\n    g_free (rpath);\n    return ret;\n}\n\nint\nseafile_is_valid_filename (const char *repo_id,\n                           const char *filename,\n                           GError **error)\n{\n    if (!repo_id || !filename) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    int ret = seaf_repo_manager_is_valid_filename (seaf->repo_mgr,\n                                                   repo_id,\n                                                   filename,\n                                                   error);\n    return ret;\n}\n\nchar *\nseafile_create_repo (const char *repo_name,\n                     const char *repo_desc,\n                     const char *owner_email,\n                     const char *passwd,\n                     int enc_version,\n                     const char *pwd_hash_algo,\n                     const char *pwd_hash_params,\n                     GError **error)\n{\n    if (!repo_name || !repo_desc || !owner_email) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    char *repo_id;\n\n    repo_id = seaf_repo_manager_create_new_repo (seaf->repo_mgr,\n                                                 repo_name, repo_desc,\n                                                 owner_email,\n                                                 passwd,\n                                                 enc_version,\n                                                 pwd_hash_algo,\n                                                 pwd_hash_params,\n                                                 error);\n    return repo_id;\n}\n\nchar *\nseafile_create_enc_repo (const char *repo_id,\n                         const char *repo_name,\n                         const char *repo_desc,\n                         const char *owner_email,\n                         const char *magic,\n                         const char *random_key,\n                         const char *salt,\n                         int enc_version,\n                         const char *pwd_hash,\n                         const char *pwd_hash_algo,\n                         const char *pwd_hash_params,\n                         GError **error)\n{\n    if (!repo_id || !repo_name || !repo_desc || !owner_email) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    char *ret;\n\n    ret = seaf_repo_manager_create_enc_repo (seaf->repo_mgr,\n                                             repo_id, repo_name, repo_desc,\n                                             owner_email,\n                                             magic, random_key, salt,\n                                             enc_version,\n                                             pwd_hash, pwd_hash_algo, pwd_hash_params,\n                                             error);\n    return ret;\n}\n\nint\nseafile_set_user_quota (const char *user, gint64 quota, GError **error)\n{\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    return seaf_quota_manager_set_user_quota (seaf->quota_mgr, user, quota);\n}\n\ngint64\nseafile_get_user_quota (const char *user, GError **error)\n{\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    return seaf_quota_manager_get_user_quota (seaf->quota_mgr, user);\n}\n\nint\nseafile_check_quota (const char *repo_id, gint64 delta, GError **error)\n{\n    int rc;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad arguments\");\n        return -1;\n    }\n\n    rc = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, repo_id, delta);\n    if (rc == 1)\n        return -1;\n    return rc;\n}\n\nGList *\nseafile_list_user_quota_usage (GError **error)\n{\n    return seaf_repo_quota_manager_list_user_quota_usage (seaf->quota_mgr);\n}\n\nstatic char *\nget_obj_id_by_path (const char *repo_id,\n                    const char *path,\n                    gboolean want_dir,\n                    GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL;\n    char *obj_id = NULL;\n\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Get repo error\");\n        goto out;\n    }\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             repo->head->commit_id);\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Get commit error\");\n        goto out;\n    }\n\n    guint32 mode = 0;\n    obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                             repo->store_id, repo->version,\n                                             commit->root_id,\n                                             path, &mode, error);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (commit)\n        seaf_commit_unref (commit);\n    if (obj_id) {\n        /* check if the mode matches */\n        if ((want_dir && !S_ISDIR(mode)) || ((!want_dir) && S_ISDIR(mode))) {\n            g_free (obj_id);\n            return NULL;\n        }\n    }\n\n    return obj_id;\n}\n\nchar *seafile_get_file_id_by_path (const char *repo_id,\n                                   const char *path,\n                                   GError **error)\n{\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n    char *ret = get_obj_id_by_path (repo_id, rpath, FALSE, error);\n\n    g_free (rpath);\n\n    filter_error (error);\n\n    return ret;\n}\n\nchar *seafile_get_dir_id_by_path (const char *repo_id,\n                                  const char *path,\n                                  GError **error)\n{\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n    char *ret = get_obj_id_by_path (repo_id, rpath, TRUE, error);\n\n    g_free (rpath);\n\n    filter_error (error);\n\n    return ret;\n}\n\nGObject *\nseafile_get_dirent_by_path (const char *repo_id, const char *path,\n                            GError **error)\n{\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"invalid repo id\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n    if (strcmp (rpath, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"invalid path\");\n        g_free (rpath);\n        return NULL;\n    }\n\n    SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Get repo error\");\n        return NULL;\n    }\n\n    SeafCommit *commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                         repo->id, repo->version,\n                                                         repo->head->commit_id);\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Get commit error\");\n        seaf_repo_unref (repo);\n        return NULL;\n    }\n\n    SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,\n                                                             repo->store_id, repo->version,\n                                                             commit->root_id, rpath,\n                                                             error);\n    g_free (rpath);\n\n    if (!dirent) {\n        filter_error (error);\n        seaf_repo_unref (repo);\n        seaf_commit_unref (commit);\n        return NULL;\n    }\n\n    GObject *obj = g_object_new (SEAFILE_TYPE_DIRENT,\n                                 \"obj_id\", dirent->id,\n                                 \"obj_name\", dirent->name,\n                                 \"mode\", dirent->mode,\n                                 \"version\", dirent->version,\n                                 \"mtime\", dirent->mtime,\n                                 \"size\", dirent->size,\n                                 \"modifier\", dirent->modifier,\n                                 NULL);\n\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    seaf_dirent_free (dirent);\n\n    return obj;\n}\n\nchar *\nseafile_list_file_blocks (const char *repo_id,\n                          const char *file_id,\n                          int offset, int limit,\n                          GError **error)\n{\n    SeafRepo *repo;\n    Seafile *file;\n    GString *buf = g_string_new (\"\");\n    int index = 0;\n\n    if (!repo_id || !is_uuid_valid(repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad repo id\");\n        return NULL;\n    }\n\n    if (!file_id || !is_object_id_valid(file_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad file id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return NULL;\n    }\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr,\n                                        repo->store_id,\n                                        repo->version, file_id);\n    if (!file) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad file id\");\n        seaf_repo_unref (repo);\n        return NULL;\n    }\n\n    if (offset < 0)\n        offset = 0;\n\n    for (index = 0; index < file->n_blocks; index++) {\n        if (index < offset) {\n            continue;\n        }\n\n        if (limit > 0) {\n            if (index >= offset + limit)\n                break;\n        }\n        g_string_append_printf (buf, \"%s\\n\", file->blk_sha1s[index]);\n    }\n\n    seafile_unref (file);\n    seaf_repo_unref (repo);\n    return g_string_free (buf, FALSE);\n}\n\n/*\n * Directories are always before files. Otherwise compare the names.\n */\nstatic gint\ncomp_dirent_func (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *dent_a = a, *dent_b = b;\n\n    if (S_ISDIR(dent_a->mode) && S_ISREG(dent_b->mode))\n        return -1;\n\n    if (S_ISREG(dent_a->mode) && S_ISDIR(dent_b->mode))\n        return 1;\n\n    return strcasecmp (dent_a->name, dent_b->name);\n}\n\nGList *\nseafile_list_dir (const char *repo_id,\n                  const char *dir_id, int offset, int limit, GError **error)\n{\n    SeafRepo *repo;\n    SeafDir *dir;\n    SeafDirent *dent;\n    SeafileDirent *d;\n    GList *res = NULL;\n    GList *p;\n\n    if (!repo_id || !is_uuid_valid(repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad repo id\");\n        return NULL;\n    }\n\n    if (!dir_id || !is_object_id_valid (dir_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad dir id\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return NULL;\n    }\n\n    dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                       repo->store_id, repo->version, dir_id);\n    if (!dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad dir id\");\n        seaf_repo_unref (repo);\n        return NULL;\n    }\n\n    dir->entries = g_list_sort (dir->entries, comp_dirent_func);\n\n    if (offset < 0) {\n        offset = 0;\n    }\n\n    int index = 0;\n    for (p = dir->entries; p != NULL; p = p->next, index++) {\n        if (index < offset) {\n            continue;\n        }\n\n        if (limit > 0) {\n            if (index >= offset + limit)\n                break;\n        }\n\n        dent = p->data;\n\n        if (!is_object_id_valid (dent->id))\n            continue;\n\n        d = g_object_new (SEAFILE_TYPE_DIRENT,\n                          \"obj_id\", dent->id,\n                          \"obj_name\", dent->name,\n                          \"mode\", dent->mode,\n                          \"version\", dent->version,\n                          \"mtime\", dent->mtime,\n                          \"size\", dent->size,\n                          \"permission\", \"\",\n                          NULL);\n        res = g_list_prepend (res, d);\n    }\n\n    seaf_dir_free (dir);\n    seaf_repo_unref (repo);\n    res = g_list_reverse (res);\n    return res;\n}\n\nGList *\nseafile_list_file_revisions (const char *repo_id,\n                             const char *commit_id,\n                             const char *path,\n                             int limit,\n                             GError **error)\n{\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    GList *commit_list;\n    commit_list = seaf_repo_manager_list_file_revisions (seaf->repo_mgr,\n                                                         repo_id, commit_id, rpath,\n                                                         limit, FALSE, FALSE, error);\n    g_free (rpath);\n\n    return commit_list;\n}\n\nGList *\nseafile_calc_files_last_modified (const char *repo_id,\n                                  const char *parent_dir,\n                                  int limit,\n                                  GError **error)\n{\n    if (!repo_id || !parent_dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (parent_dir);\n\n    GList *ret = seaf_repo_manager_calc_files_last_modified (seaf->repo_mgr,\n                                                             repo_id, rpath,\n                                                             limit, error);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_revert_file (const char *repo_id,\n                     const char *commit_id,\n                     const char *path,\n                     const char *user,\n                     GError **error)\n{\n    if (!repo_id || !commit_id || !path || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_object_id_valid (commit_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit id\");\n        return -1;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    int ret = seaf_repo_manager_revert_file (seaf->repo_mgr,\n                                             repo_id, commit_id,\n                                             rpath, user, error);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_revert_dir (const char *repo_id,\n                    const char *commit_id,\n                    const char *path,\n                    const char *user,\n                    GError **error)\n{\n    if (!repo_id || !commit_id || !path || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_object_id_valid (commit_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit id\");\n        return -1;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    int ret = seaf_repo_manager_revert_dir (seaf->repo_mgr,\n                                            repo_id, commit_id,\n                                            rpath, user, error);\n    g_free (rpath);\n\n    return ret;\n}\n\n\nchar *\nseafile_check_repo_blocks_missing (const char *repo_id,\n                                   const char *blockids_json,\n                                   GError **error)\n{\n    json_t *array, *value, *ret_json;\n    json_error_t err;\n    size_t index;\n    char *json_data, *ret;\n    SeafRepo *repo = NULL;\n\n    array = json_loadb (blockids_json, strlen(blockids_json), 0, &err);\n    if (!array) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %.8s.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Repo not found\");\n        json_decref (array);\n        return NULL;\n    }\n\n    ret_json = json_array();\n    size_t n = json_array_size (array);\n    for (index = 0; index < n; index++) {\n        value = json_array_get (array, index);\n        const char *blockid = json_string_value (value);\n        if (!blockid)\n            continue;\n        if (!seaf_block_manager_block_exists(seaf->block_mgr, repo_id,\n                                             repo->version, blockid)) {\n            json_array_append_new (ret_json, json_string(blockid));\n        }\n    }\n\n    json_data = json_dumps (ret_json, 0);\n    ret = g_strdup (json_data);\n\n    free (json_data);\n    json_decref (ret_json);\n    json_decref (array);\n    seaf_repo_unref (repo);\n    return ret;\n}\n\n\nGList *\nseafile_get_deleted (const char *repo_id, int show_days,\n                     const char *path, const char *scan_stat,\n                     int limit, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Bad arguments\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *rpath = NULL;\n    if (path)\n        rpath = format_dir_path (path);\n\n    GList *ret = seaf_repo_manager_get_deleted_entries (seaf->repo_mgr,\n                                                        repo_id, show_days,\n                                                        rpath, scan_stat,\n                                                        limit, error);\n    g_free (rpath);\n\n    return ret;\n}\n\nchar *\nseafile_generate_repo_token (const char *repo_id,\n                             const char *email,\n                             GError **error)\n{\n    char *token;\n\n    if (!repo_id || !email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    token = seaf_repo_manager_generate_repo_token (seaf->repo_mgr, repo_id, email, error);\n\n    return token;\n}\n\nint\nseafile_delete_repo_token (const char *repo_id,\n                           const char *token,\n                           const char *user,\n                           GError **error)\n{\n    if (!repo_id || !token || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    return seaf_repo_manager_delete_token (seaf->repo_mgr,\n                                           repo_id, token, user, error);\n}\n\nGList *\nseafile_list_repo_tokens (const char *repo_id,\n                          GError **error)\n{\n    GList *ret_list;\n\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    ret_list = seaf_repo_manager_list_repo_tokens (seaf->repo_mgr, repo_id, error);\n\n    return ret_list;\n}\n\nGList *\nseafile_list_repo_tokens_by_email (const char *email,\n                                   GError **error)\n{\n    GList *ret_list;\n\n    if (!email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return NULL;\n    }\n\n    ret_list = seaf_repo_manager_list_repo_tokens_by_email (seaf->repo_mgr, email, error);\n\n    return ret_list;\n}\n\nint\nseafile_delete_repo_tokens_by_peer_id(const char *email,\n                                      const char *peer_id,\n                                      GError **error)\n{\n    if (!email || !peer_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    /* check the peer id */\n    if (strlen(peer_id) != 40) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"invalid peer id\");\n        return -1;\n    }\n    const char *c = peer_id;\n    while (*c) {\n        char v = *c;\n        if ((v >= '0' && v <= '9') || (v >= 'a' && v <= 'z')) {\n            c++;\n            continue;\n        } else {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"invalid peer id\");\n            return -1;\n        }\n    }\n\n    GList *tokens = NULL;\n    if (seaf_repo_manager_delete_repo_tokens_by_peer_id (seaf->repo_mgr, email, peer_id, &tokens, error) < 0) {\n        g_list_free_full (tokens, (GDestroyNotify)g_free);\n        return -1;\n    }\n\n#ifdef HAVE_EVHTP\n    seaf_http_server_invalidate_tokens(seaf->http_server, tokens);\n#endif\n    g_list_free_full (tokens, (GDestroyNotify)g_free);\n    return 0;\n}\n\nint\nseafile_delete_repo_tokens_by_email (const char *email,\n                                     GError **error)\n{\n    if (!email) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    return seaf_repo_manager_delete_repo_tokens_by_email (seaf->repo_mgr, email, error);\n}\n\nchar *\nseafile_check_permission (const char *repo_id, const char *user, GError **error)\n{\n    if (!repo_id || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (strlen(user) == 0)\n        return NULL;\n\n    return seaf_repo_manager_check_permission (seaf->repo_mgr,\n                                               repo_id, user, error);\n}\n\nchar *\nseafile_check_permission_by_path (const char *repo_id, const char *path,\n                                  const char *user, GError **error)\n{\n    return seafile_check_permission (repo_id, user, error);\n}\n\nGList *\nseafile_list_dir_with_perm (const char *repo_id,\n                            const char *path,\n                            const char *dir_id,\n                            const char *user,\n                            int offset,\n                            int limit,\n                            GError **error)\n{\n    if (!repo_id || !is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (!path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid path\");\n        return NULL;\n    }\n\n    if (!dir_id || !is_object_id_valid (dir_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid dir id\");\n        return NULL;\n    }\n\n    if (!user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid user\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    GList *ret = seaf_repo_manager_list_dir_with_perm (seaf->repo_mgr,\n                                                       repo_id,\n                                                       rpath,\n                                                       dir_id,\n                                                       user,\n                                                       offset,\n                                                       limit,\n                                                       error);\n    g_free (rpath);\n\n    return ret;\n}\n\nint\nseafile_set_share_permission (const char *repo_id,\n                              const char *from_email,\n                              const char *to_email,\n                              const char *permission,\n                              GError **error)\n{\n    if (!repo_id || !from_email || !to_email || !permission) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo_id parameter\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n    }\n\n    return seaf_share_manager_set_permission (seaf->share_mgr,\n                                              repo_id,\n                                              from_email,\n                                              to_email,\n                                              permission);\n}\n\nint\nseafile_set_group_repo_permission (int group_id,\n                                   const char *repo_id,\n                                   const char *permission,\n                                   GError **error)\n{\n    if (!repo_id || !permission) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments should not be empty\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (!is_permission_valid (permission)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid permission parameter\");\n        return -1;\n\n    }\n\n    return seaf_repo_manager_set_group_repo_perm (seaf->repo_mgr,\n                                                  repo_id,\n                                                  group_id,\n                                                  permission,\n                                                  error);\n}\n\nchar *\nseafile_get_file_id_by_commit_and_path(const char *repo_id,\n                                       const char *commit_id,\n                                       const char *path,\n                                       GError **error)\n{\n    SeafRepo *repo;\n    SeafCommit *commit;\n    char *file_id;\n    guint32 mode;\n\n    if (!repo_id || !is_uuid_valid(repo_id) || !commit_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Arguments should not be empty\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        return NULL;\n    }\n\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                            repo_id,\n                                            repo->version,\n                                            commit_id);\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"bad commit id\");\n        seaf_repo_unref (repo);\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              commit->root_id, rpath, &mode, error);\n    if (file_id && S_ISDIR(mode)) {\n        g_free (file_id);\n        file_id = NULL;\n    }\n    g_free (rpath);\n\n    filter_error (error);\n\n    seaf_commit_unref(commit);\n    seaf_repo_unref (repo);\n\n    return file_id;\n}\n\n/* Virtual repo related */\n\nchar *\nseafile_create_virtual_repo (const char *origin_repo_id,\n                             const char *path,\n                             const char *repo_name,\n                             const char *repo_desc,\n                             const char *owner,\n                             const char *passwd,\n                             GError **error)\n{\n    if (!origin_repo_id || !path ||!repo_name || !repo_desc || !owner) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (origin_repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    char *repo_id;\n    char *rpath = format_dir_path (path);\n\n    repo_id = seaf_repo_manager_create_virtual_repo (seaf->repo_mgr,\n                                                     origin_repo_id, rpath,\n                                                     repo_name, repo_desc,\n                                                     owner, passwd, error);\n    g_free (rpath);\n\n    return repo_id;\n}\n\nGList *\nseafile_get_virtual_repos_by_owner (const char *owner, GError **error)\n{\n    GList *repos, *ret = NULL, *ptr;\n    SeafRepo *r, *o;\n    SeafileRepo *repo;\n    char *orig_repo_id;\n    gboolean is_original_owner;\n\n    if (!owner) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    repos = seaf_repo_manager_get_virtual_repos_by_owner (seaf->repo_mgr,\n                                                          owner,\n                                                          error);\n    for (ptr = repos; ptr != NULL; ptr = ptr->next) {\n        r = ptr->data;\n\n        orig_repo_id = r->virtual_info->origin_repo_id;\n        o = seaf_repo_manager_get_repo (seaf->repo_mgr, orig_repo_id);\n        if (!o) {\n            seaf_warning (\"Failed to get origin repo %.10s.\\n\", orig_repo_id);\n            seaf_repo_unref (r);\n            continue;\n        }\n\n        char *orig_owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr,\n                                                             orig_repo_id);\n        if (g_strcmp0 (orig_owner, owner) == 0)\n            is_original_owner = TRUE;\n        else\n            is_original_owner = FALSE;\n        g_free (orig_owner);\n\n        char *perm = seaf_repo_manager_check_permission (seaf->repo_mgr,\n                                                         r->id, owner, NULL);\n\n        repo = (SeafileRepo *)convert_repo (r);\n        if (repo) {\n            g_object_set (repo, \"is_original_owner\", is_original_owner,\n                          \"origin_repo_name\", o->name,\n                          \"virtual_perm\", perm, NULL);\n            ret = g_list_prepend (ret, repo);\n        }\n\n        seaf_repo_unref (r);\n        seaf_repo_unref (o);\n        g_free (perm);\n    }\n    g_list_free (repos);\n\n    return g_list_reverse (ret);\n}\n\nGObject *\nseafile_get_virtual_repo (const char *origin_repo,\n                          const char *path,\n                          const char *owner,\n                          GError **error)\n{\n    char *repo_id;\n    GObject *repo_obj;\n\n    char *rpath = format_dir_path (path);\n\n    repo_id = seaf_repo_manager_get_virtual_repo_id (seaf->repo_mgr,\n                                                     origin_repo,\n                                                     rpath,\n                                                     owner);\n    g_free (rpath);\n\n    if (!repo_id)\n        return NULL;\n\n    repo_obj = seafile_get_repo (repo_id, error);\n\n    g_free (repo_id);\n    return repo_obj;\n}\n\n/* System default library */\n\nchar *\nseafile_get_system_default_repo_id (GError **error)\n{\n    return get_system_default_repo_id(seaf);\n}\n\nstatic int\nupdate_valid_since_time (SeafRepo *repo, gint64 new_time)\n{\n    int ret = 0;\n    gint64 old_time = seaf_repo_manager_get_repo_valid_since (repo->manager,\n                                                              repo->id);\n\n    if (new_time > 0) {\n        if (new_time > old_time)\n            ret = seaf_repo_manager_set_repo_valid_since (repo->manager,\n                                                          repo->id,\n                                                          new_time);\n    } else if (new_time == 0) {\n        /* Only the head commit is valid after GC if no history is kept. */\n        SeafCommit *head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                           repo->id, repo->version,\n                                                           repo->head->commit_id);\n        if (head && (old_time < 0 || head->ctime > (guint64)old_time))\n            ret = seaf_repo_manager_set_repo_valid_since (repo->manager,\n                                                          repo->id,\n                                                          head->ctime);\n        seaf_commit_unref (head);\n    }\n\n    return ret;\n}\n\n/* Clean up a repo's history.\n * It just set valid-since time but not actually delete the data.\n */\nint\nseafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error)\n{\n    SeafRepo *repo;\n    int ret;\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid arguments\");\n        return -1;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Cannot find repo %s.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid arguments\");\n        return -1;\n    }\n\n    gint64 truncate_time, now;\n    if (keep_days > 0) {\n        now = (gint64)time(NULL);\n        truncate_time = now - keep_days * 24 * 3600;\n    } else\n        truncate_time = 0;\n\n    ret = update_valid_since_time (repo, truncate_time);\n    if (ret < 0) {\n        seaf_warning (\"Failed to update valid since time for repo %.8s.\\n\", repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Database error\");\n    }\n\n    seaf_repo_unref (repo);\n    return ret;\n}\n\nGList *\nseafile_get_shared_users_for_subdir (const char *repo_id,\n                                     const char *path,\n                                     const char *from_user,\n                                     GError **error)\n{\n    if (!repo_id || !path || !from_user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo_id\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    GList *ret = seaf_repo_manager_get_shared_users_for_subdir (seaf->repo_mgr,\n                                                                repo_id, rpath,\n                                                                from_user, error);\n    g_free (rpath);\n\n    return ret;\n}\n\nGList *\nseafile_get_shared_groups_for_subdir (const char *repo_id,\n                                      const char *path,\n                                      const char *from_user,\n                                      GError **error)\n{\n    if (!repo_id || !path || !from_user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo_id\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n\n    GList *ret = seaf_repo_manager_get_shared_groups_for_subdir (seaf->repo_mgr,\n                                                                 repo_id, rpath,\n                                                                 from_user, error);\n    g_free (rpath);\n\n    return ret;\n}\n\ngint64\nseafile_get_total_file_number (GError **error)\n{\n    return seaf_get_total_file_number (error);\n}\n\ngint64\nseafile_get_total_storage (GError **error)\n{\n    return seaf_get_total_storage (error);\n}\n\nGObject *\nseafile_get_file_count_info_by_path (const char *repo_id,\n                                     const char *path,\n                                     GError **error)\n{\n    if (!repo_id || !path) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    GObject *ret = NULL;\n    SeafRepo *repo = NULL;\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %.10s\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Library not exists\");\n        return NULL;\n    }\n\n    ret = seaf_fs_manager_get_file_count_info_by_path (seaf->fs_mgr,\n                                                       repo->store_id,\n                                                       repo->version,\n                                                       repo->root_id,\n                                                       path, error);\n    seaf_repo_unref (repo);\n\n    return ret;\n}\n\nchar *\nseafile_get_trash_repo_owner (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    return seaf_get_trash_repo_owner (repo_id);\n}\n\nint\nseafile_mkdir_with_parents (const char *repo_id, const char *parent_dir,\n                            const char *new_dir_path, const char *user,\n                            GError **error)\n{\n    if (!repo_id || !parent_dir || !new_dir_path || !user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_mkdir_with_parents (seaf->repo_mgr, repo_id,\n                                              parent_dir, new_dir_path,\n                                              user, error) < 0) {\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_set_server_config_int (const char *group, const char *key, int value,\n                               GError **error)\n{\n    if (!group || !key) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_set_config_int (seaf->cfg_mgr, group, key, value);\n}\n\nint\nseafile_get_server_config_int (const char *group, const char *key, GError **error)\n{\n    if (!group || !key ) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_get_config_int (seaf->cfg_mgr, group, key);\n}\n\nint\nseafile_set_server_config_int64 (const char *group, const char *key, gint64 value,\n                                 GError **error)\n{\n    if (!group || !key) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_set_config_int64 (seaf->cfg_mgr, group, key, value);\n}\n\ngint64\nseafile_get_server_config_int64 (const char *group, const char *key, GError **error)\n{\n    if (!group || !key ) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_get_config_int64 (seaf->cfg_mgr, group, key);\n}\n\nint\nseafile_set_server_config_string (const char *group, const char *key, const char *value,\n                                  GError **error)\n{\n    if (!group || !key || !value) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_set_config_string (seaf->cfg_mgr, group, key, value);\n}\n\nchar *\nseafile_get_server_config_string (const char *group, const char *key, GError **error)\n{\n    if (!group || !key ) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    return seaf_cfg_manager_get_config_string (seaf->cfg_mgr, group, key);\n}\n\nint\nseafile_set_server_config_boolean (const char *group, const char *key, int value,\n                                   GError **error)\n{\n    if (!group || !key) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_set_config_boolean (seaf->cfg_mgr, group, key, value);\n}\n\nint\nseafile_get_server_config_boolean (const char *group, const char *key, GError **error)\n{\n    if (!group || !key ) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return -1;\n    }\n\n    return seaf_cfg_manager_get_config_boolean (seaf->cfg_mgr, group, key);\n}\n\nGObject *\nseafile_get_group_shared_repo_by_path (const char *repo_id,\n                                       const char *path,\n                                       int group_id,\n                                       int is_org,\n                                       GError **error)\n{\n    if (!repo_id || group_id < 0) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n    SeafRepoManager *mgr = seaf->repo_mgr;\n\n    return seaf_get_group_shared_repo_by_path (mgr, repo_id, path, group_id, is_org ? TRUE:FALSE, error);\n}\n\nGObject *\nseafile_get_shared_repo_by_path (const char *repo_id,\n                                 const char *path,\n                                 const char *shared_to,\n                                 int is_org,\n                                 GError **error)\n{\n    if (!repo_id || !shared_to) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n    SeafRepoManager *mgr = seaf->repo_mgr;\n\n    return seaf_get_shared_repo_by_path (mgr, repo_id, path, shared_to, is_org ? TRUE:FALSE, error);\n}\n\nGList *\nseafile_get_group_repos_by_user (const char *user, GError **error)\n{\n    if (!user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n    SeafRepoManager *mgr = seaf->repo_mgr;\n\n    return seaf_get_group_repos_by_user (mgr, user, -1, error);\n}\n\nGList *\nseafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error)\n{\n    if (!user) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n    SeafRepoManager *mgr = seaf->repo_mgr;\n\n    return seaf_get_group_repos_by_user (mgr, user, org_id, error);\n}\n\nint\nseafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return FALSE;\n    }\n\n    gboolean exists = seaf_share_manager_repo_has_been_shared (seaf->share_mgr, repo_id,\n                                                               including_groups ? TRUE : FALSE);\n    return exists ? 1 : 0;\n}\n\nGList *\nseafile_get_shared_users_by_repo (const char *repo_id, GError **error)\n{\n    if (!repo_id) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n\n    return seaf_share_manager_get_shared_users_by_repo (seaf->share_mgr,\n                                                        repo_id);\n}\n\nGList *\nseafile_org_get_shared_users_by_repo (int org_id,\n                                      const char *repo_id,\n                                      GError **error)\n{\n    if (!repo_id || org_id < 0) {\n        g_set_error (error, 0, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n\n    return seaf_share_manager_org_get_shared_users_by_repo (seaf->share_mgr,\n                                                            org_id, repo_id);\n}\n\n/* Resumable file upload. */\n\ngint64\nseafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path,\n                                    GError **error)\n{\n    if (!repo_id || !is_uuid_valid(repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo id\");\n        return -1;\n    }\n\n    int path_len;\n    if (!file_path || (path_len = strlen(file_path)) == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid file path\");\n        return -1;\n    }\n\n    char *rfile_path = format_dir_path (file_path);\n    gint64 ret = seaf_repo_manager_get_upload_tmp_file_offset (seaf->repo_mgr, repo_id,\n                                                               rfile_path, error);\n    g_free (rfile_path);\n\n    return ret;\n}\n\nchar *\nseafile_convert_repo_path (const char *repo_id,\n                           const char *path,\n                           const char *user,\n                           int is_org,\n                           GError **error)\n{\n    if (!is_uuid_valid(repo_id) || !path || !user) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return NULL;\n    }\n\n    char *rpath = format_dir_path (path);\n    char *ret = seaf_repo_manager_convert_repo_path(seaf->repo_mgr, repo_id, rpath, user, is_org ? TRUE : FALSE, error);\n    g_free(rpath);\n\n    return ret;\n}\n\nint\nseafile_set_repo_status(const char *repo_id, int status, GError **error)\n{\n    if (!is_uuid_valid(repo_id) ||\n        status < 0 || status >= N_REPO_STATUS) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return -1;\n    }\n\n    return seaf_repo_manager_set_repo_status(seaf->repo_mgr, repo_id, status);\n}\n\nint\nseafile_get_repo_status(const char *repo_id, GError **error)\n{\n    int status;\n\n    if (!is_uuid_valid(repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Arguments error\");\n        return -1;\n    }\n\n    status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, repo_id);\n\n    return (status == -1) ? 0 : status;\n}\n\nGList *\nseafile_search_files (const char *repo_id, const char *str, GError **error)\n{\n    return seafile_search_files_by_path (repo_id, NULL, str, error);\n}\n\nGList *\nseafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error)\n{\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return NULL;\n    }\n\n    GList *file_list = seaf_fs_manager_search_files_by_path (seaf->fs_mgr, repo_id, path, str);\n    GList *ret = NULL, *ptr;\n\n    for (ptr = file_list; ptr; ptr=ptr->next) {\n        SearchResult *sr = ptr->data;\n        SeafileSearchResult *search_result = seafile_search_result_new ();\n        g_object_set (search_result, \"path\", sr->path, \"size\", sr->size,\n                      \"mtime\", sr->mtime, \"is_dir\", sr->is_dir, NULL);\n\n        ret = g_list_prepend (ret, search_result);\n        g_free (sr->path);\n        g_free (sr);\n    }\n\n    return g_list_reverse (ret);\n}\n\n/*RPC functions merged from ccnet-server*/\nint\nccnet_rpc_add_emailuser (const char *email, const char *passwd,\n                         int is_staff, int is_active, GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr; \n    int ret;\n    \n    if (!email || !passwd) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Email and passwd can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_user_manager_add_emailuser (user_mgr, email, passwd,\n                                            is_staff, is_active);\n    \n    return ret;\n}\n\nint\nccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr; \n    int ret;\n\n    if (!email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Email can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_user_manager_remove_emailuser (user_mgr, source, email);\n\n    return ret;\n}\n\nint\nccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error)\n{\n   CcnetUserManager *user_mgr = seaf->user_mgr; \n    int ret;\n    \n    if (!email || !passwd) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Email and passwd can not be NULL\");\n        return -1;\n    }\n\n    if (passwd[0] == 0)\n        return -1;\n\n    ret = ccnet_user_manager_validate_emailuser (user_mgr, email, passwd);\n\n    return ret;\n}\n\nGObject*\nccnet_rpc_get_emailuser (const char *email, GError **error)\n{\n    if (!email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Email can not be NULL\");\n        return NULL;\n    }\n\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n    CcnetEmailUser *emailuser = NULL;\n    \n    emailuser = ccnet_user_manager_get_emailuser (user_mgr, email, error);\n    \n    return (GObject *)emailuser;\n}\n\nGObject*\nccnet_rpc_get_emailuser_with_import (const char *email, GError **error)\n{\n    if (!email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Email can not be NULL\");\n        return NULL;\n    }\n\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n    CcnetEmailUser *emailuser = NULL;\n\n    emailuser = ccnet_user_manager_get_emailuser_with_import (user_mgr, email, error);\n\n    return (GObject *)emailuser;\n}\n\nGObject*\nccnet_rpc_get_emailuser_by_id (int id, GError **error)\n{\n   CcnetUserManager *user_mgr = seaf->user_mgr; \n    CcnetEmailUser *emailuser = NULL;\n    \n    emailuser = ccnet_user_manager_get_emailuser_by_id (user_mgr, id);\n    \n    return (GObject *)emailuser;\n}\n\nGList*\nccnet_rpc_get_emailusers (const char *source,\n                          int start, int limit,\n                          const char *status,\n                          GError **error)\n{\n   CcnetUserManager *user_mgr = seaf->user_mgr; \n    GList *emailusers = NULL;\n\n    emailusers = ccnet_user_manager_get_emailusers (user_mgr, source, start, limit, status);\n    \n    return emailusers;\n}\n\nGList*\nccnet_rpc_search_emailusers (const char *source,\n                             const char *email_patt,\n                             int start, int limit,\n                             GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr; \n    GList *emailusers = NULL;\n\n    emailusers = ccnet_user_manager_search_emailusers (user_mgr,\n                                                       source,\n                                                       email_patt,\n                                                       start, limit);\n    \n    return emailusers;\n}\n\nGList*\nccnet_rpc_search_groups (const char *group_patt,\n                         int start, int limit,\n                         GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *groups = NULL;\n\n    groups = ccnet_group_manager_search_groups (group_mgr,\n                                                group_patt,\n                                                start, limit);\n    return groups;\n}\n\nGList *\nccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_group_manager_search_group_members (group_mgr, group_id, pattern);\n\n    return ret;\n}\n\nGList*\nccnet_rpc_get_top_groups (int including_org, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *groups = NULL;\n\n    groups = ccnet_group_manager_get_top_groups (group_mgr, including_org ? TRUE : FALSE, error);\n\n    return groups;\n}\n\nGList*\nccnet_rpc_get_child_groups (int group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *groups = NULL;\n\n    groups = ccnet_group_manager_get_child_groups (group_mgr, group_id, error);\n\n    return groups;\n}\n\nGList*\nccnet_rpc_get_descendants_groups(int group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *groups = NULL;\n\n    groups = ccnet_group_manager_get_descendants_groups (group_mgr, group_id, error);\n\n    return groups;\n}\n\ngint64\nccnet_rpc_count_emailusers (const char *source, GError **error)\n{\n   CcnetUserManager *user_mgr = seaf->user_mgr; \n\n   return ccnet_user_manager_count_emailusers (user_mgr, source);\n}\n\ngint64\nccnet_rpc_count_inactive_emailusers (const char *source, GError **error)\n{\n   CcnetUserManager *user_mgr = seaf->user_mgr;\n\n   return ccnet_user_manager_count_inactive_emailusers (user_mgr, source);\n}\n\nint\nccnet_rpc_update_emailuser (const char *source, int id, const char* passwd,\n                            int is_staff, int is_active,\n                            GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n\n    return ccnet_user_manager_update_emailuser(user_mgr, source, id, passwd,\n                                               is_staff, is_active);\n}\n\nint\nccnet_rpc_update_role_emailuser (const char* email, const char* role,\n                            GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n\n    return ccnet_user_manager_update_role_emailuser(user_mgr, email, role);\n}\n\nGList*\nccnet_rpc_get_superusers (GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr; \n\n    return ccnet_user_manager_get_superusers(user_mgr);\n}\n\nGList *\nccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error)\n{\n    if (!user_list || !source) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n\n    return ccnet_user_manager_get_emailusers_in_list (user_mgr, source, user_list, error);\n}\n\nint\nccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error)\n{\n    if (!old_email || !new_email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n\n    return ccnet_user_manager_update_emailuser_id (user_mgr, old_email, new_email, error);\n}\n\nint\nccnet_rpc_create_group (const char *group_name, const char *user_name,\n                        const char *type, int parent_group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (!group_name || !user_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Group name and user name can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_create_group (group_mgr, group_name, user_name, parent_group_id, error);\n\n    return ret;\n}\n\nint\nccnet_rpc_create_org_group (int org_id, const char *group_name,\n                            const char *user_name, int parent_group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (org_id < 0 || !group_name || !user_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad args\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_create_org_group (group_mgr, org_id,\n                                                group_name, user_name, parent_group_id, error);\n\n    return ret;\n}\n\nint\nccnet_rpc_remove_group (int group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Invalid group_id parameter\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_remove_group (group_mgr, group_id, FALSE, error);\n\n    return ret;\n\n}\n\nint\nccnet_rpc_group_add_member (int group_id, const char *user_name,\n                            const char *member_name, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0 || !user_name || !member_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Group id and user name and member name can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_add_member (group_mgr, group_id, user_name, member_name,\n                                          error);\n\n    return ret;\n}\n\nint\nccnet_rpc_group_remove_member (int group_id, const char *user_name,\n                               const char *member_name, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (!user_name || !member_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"User name and member name can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_remove_member (group_mgr, group_id, user_name,\n                                             member_name, error);\n\n    return ret;\n}\n\nint\nccnet_rpc_group_set_admin (int group_id, const char *member_name,\n                           GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0 || !member_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_set_admin (group_mgr, group_id, member_name,\n                                         error);\n    return ret;\n}\n\nint\nccnet_rpc_group_unset_admin (int group_id, const char *member_name,\n                           GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0 || !member_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_unset_admin (group_mgr, group_id, member_name,\n                                           error);\n    return ret;\n}\n\nint\nccnet_rpc_set_group_name (int group_id, const char *group_name,\n                          GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0 || !group_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_set_group_name (group_mgr, group_id, group_name,\n                                              error);\n    return ret;\n}\n\nint\nccnet_rpc_quit_group (int group_id, const char *user_name, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    int ret;\n\n    if (group_id <= 0 || !user_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Group id and user name can not be NULL\");\n        return -1;\n    }\n\n    ret = ccnet_group_manager_quit_group (group_mgr, group_id, user_name, error);\n\n    return ret;\n}\n\nGList *\nccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    if (!username) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"User name can not be NULL\");\n        return NULL;\n    }\n\n    ret = ccnet_group_manager_get_groups_by_user (group_mgr, username,\n                                                  return_ancestors ? TRUE : FALSE, error);\n    return ret;\n}\n\nGList *\nccnet_rpc_list_all_departments (GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_group_manager_list_all_departments (group_mgr, error);\n\n    return ret;\n}\n\nGList*\nseafile_get_repos_by_id_prefix  (const char *id_prefix, int start,\n                                 int limit, GError **error)\n{\n    GList *ret = NULL;\n    GList *repos, *ptr;\n\n    if (!id_prefix) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Argument should not be null\");\n        return NULL;\n    }\n\n    repos = seaf_repo_manager_get_repos_by_id_prefix (seaf->repo_mgr, id_prefix,\n                                                      start, limit);\n\n    ret = convert_repo_list (repos);\n\n    for(ptr = repos; ptr; ptr = ptr->next) {\n        seaf_repo_unref ((SeafRepo *)ptr->data);\n    }\n    g_list_free (repos);\n\n    return ret;\n}\n\nGList *\nccnet_rpc_get_all_groups (int start, int limit,\n                          const char *source, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_group_manager_get_all_groups (group_mgr, start, limit, error);\n\n    return ret;\n}\n\nGList *\nccnet_rpc_get_ancestor_groups (int group_id, GError ** error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_group_manager_get_ancestor_groups (group_mgr, group_id);\n\n    return ret;\n}\n\nGObject *\nccnet_rpc_get_group (int group_id, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    CcnetGroup *group = NULL;\n\n    group = ccnet_group_manager_get_group (group_mgr, group_id, error);\n    if (!group) {\n        return NULL;\n    }\n\n    /* g_object_ref (group); */\n    return (GObject *)group;\n}\n\n\nGList *\nccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    if (start < 0 ) {\n        start = 0;\n    }\n\n    ret = ccnet_group_manager_get_group_members (group_mgr, group_id, start, limit, error);\n    if (ret == NULL)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\nGList *\nccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_group_manager_get_members_with_prefix (group_mgr, group_id, prefix, error);\n\n    return ret;\n}\n\nint\nccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure,\n                             GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n\n    if (group_id <= 0 || !user_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL,\n                     \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_group_manager_check_group_staff (group_mgr,\n                                                  group_id, user_name,\n                                                  in_structure ? TRUE : FALSE);\n}\n\nint\nccnet_rpc_remove_group_user (const char *user, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    if (!user) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_group_manager_remove_group_user (group_mgr, user);\n}\n\nint\nccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    if (!user || group_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return 0;\n    }\n\n    return ccnet_group_manager_is_group_user (group_mgr, group_id, user, in_structure ? TRUE : FALSE);\n}\n\nint\nccnet_rpc_set_group_creator (int group_id, const char *user_name,\n                             GError **error)\n{\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n    if (!user_name || group_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_group_manager_set_group_creator (group_mgr, group_id,\n                                                  user_name);\n}\n\nGList *\nccnet_rpc_get_groups_members (const char *group_ids, GError **error)\n{\n    if (!group_ids || g_strcmp0(group_ids, \"\") == 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n\n    return ccnet_group_manager_get_groups_members (group_mgr, group_ids, error);\n}\n\nint\nccnet_rpc_create_org (const char *org_name, const char *url_prefix,\n                      const char *creator, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (!org_name || !url_prefix || !creator) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_create_org (org_mgr, org_name, url_prefix, creator,\n                                         error);\n}\n\nint\nccnet_rpc_remove_org (int org_id, GError **error)\n{\n    GList *group_ids = NULL, *email_list=NULL, *ptr;\n    const char *url_prefix = NULL;\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n    CcnetGroupManager *group_mgr = seaf->group_mgr;\n\n    if (org_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    url_prefix = ccnet_org_manager_get_url_prefix_by_org_id (org_mgr, org_id,\n                                                             error);\n    email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix,\n                                                       0, INT_MAX);\n    ptr = email_list;\n    while (ptr) {\n        ccnet_user_manager_remove_emailuser (user_mgr, \"DB\", (gchar *)ptr->data);\n        ptr = ptr->next;\n    }\n    string_list_free (email_list);\n\n    group_ids = ccnet_org_manager_get_org_group_ids (org_mgr, org_id, 0, INT_MAX);\n    ptr = group_ids;\n    while (ptr) {\n        ccnet_group_manager_remove_group (group_mgr, (int)(long)ptr->data, TRUE, error);\n        ptr = ptr->next;\n    }\n    g_list_free (group_ids);\n\n    return ccnet_org_manager_remove_org (org_mgr, org_id, error);\n}\n\nGList *\nccnet_rpc_get_all_orgs (int start, int limit, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *ret = NULL;\n\n    ret = ccnet_org_manager_get_all_orgs (org_mgr, start, limit);\n\n    return ret;\n}\n\ngint64\nccnet_rpc_count_orgs (GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    return ccnet_org_manager_count_orgs(org_mgr);\n}\n\n\nGObject *\nccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error)\n{\n    CcnetOrganization *org = NULL;\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (!url_prefix) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n\n    org = ccnet_org_manager_get_org_by_url_prefix (org_mgr, url_prefix, error);\n    if (!org)\n        return NULL;\n\n    return (GObject *)org;\n}\n\nGObject *\nccnet_rpc_get_org_by_id (int org_id, GError **error)\n{\n    CcnetOrganization *org = NULL;\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id <= 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n\n    org = ccnet_org_manager_get_org_by_id (org_mgr, org_id, error);\n    if (!org)\n        return NULL;\n\n    return (GObject *)org;\n}\n\nint\nccnet_rpc_add_org_user (int org_id, const char *email, int is_staff,\n                        GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_add_org_user (org_mgr, org_id, email, is_staff,\n                                           error);\n}\n\nint\nccnet_rpc_remove_org_user (int org_id, const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_remove_org_user (org_mgr, org_id, email, error);\n}\n\nGList *\nccnet_rpc_get_orgs_by_user (const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *org_list = NULL;\n\n    org_list = ccnet_org_manager_get_orgs_by_user (org_mgr, email, error);\n\n    return org_list;\n}\n\nGList *\nccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit,\n                              GError **error)\n{\n    CcnetUserManager *user_mgr = seaf->user_mgr;\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *email_list = NULL, *ptr;\n    GList *ret = NULL;\n\n    if (!url_prefix) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n\n    email_list = ccnet_org_manager_get_org_emailusers (org_mgr, url_prefix,\n                                                       start, limit);\n    if (email_list == NULL) {\n        return NULL;\n    }\n\n    ptr = email_list;\n    while (ptr) {\n        char *email = ptr->data;\n        CcnetEmailUser *emailuser = ccnet_user_manager_get_emailuser (user_mgr,\n                                                                      email, NULL);\n        if (emailuser != NULL) {\n            ret = g_list_prepend (ret, emailuser);\n        }\n\n        ptr = ptr->next;\n    }\n\n    string_list_free (email_list);\n\n    return g_list_reverse (ret);\n}\n\nint\nccnet_rpc_add_org_group (int org_id, int group_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || group_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_add_org_group (org_mgr, org_id, group_id, error);\n}\n\nint\nccnet_rpc_remove_org_group (int org_id, int group_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || group_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_remove_org_group (org_mgr, org_id, group_id,\n                                               error);\n}\n\nint\nccnet_rpc_is_org_group (int group_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (group_id <= 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_is_org_group (org_mgr, group_id, error);\n}\n\nint\nccnet_rpc_get_org_id_by_group (int group_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (group_id <= 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_get_org_id_by_group (org_mgr, group_id, error);\n}\n\nGList *\nccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *ret = NULL;\n\n    if (org_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n\n    /* correct parameter */\n    if (start < 0 ) {\n        start = 0;\n    }\n\n    ret = ccnet_org_manager_get_org_groups (org_mgr, org_id, start, limit);\n\n    return ret;\n}\n\nGList *\nccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *ret = NULL;\n\n    if (org_id < 0 || !user) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n    ret = ccnet_org_manager_get_org_groups_by_user (org_mgr, user, org_id);\n\n    return ret;\n}\n\nGList *\nccnet_rpc_get_org_top_groups (int org_id, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n    GList *ret = NULL;\n\n    if (org_id < 0) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return NULL;\n    }\n    ret = ccnet_org_manager_get_org_top_groups (org_mgr, org_id, error);\n\n    return ret;\n}\n\nint\nccnet_rpc_org_user_exists (int org_id, const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_org_user_exists (org_mgr, org_id, email, error);\n}\n\nint\nccnet_rpc_is_org_staff (int org_id, const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_is_org_staff (org_mgr, org_id, email, error);\n}\n\nint\nccnet_rpc_set_org_staff (int org_id, const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_set_org_staff (org_mgr, org_id, email, error);\n}\n\nint\nccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !email) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_unset_org_staff (org_mgr, org_id, email, error);\n}\n\nint\nccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error)\n{\n    CcnetOrgManager *org_mgr = seaf->org_mgr;\n\n    if (org_id < 0 || !org_name) {\n        g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Bad arguments\");\n        return -1;\n    }\n\n    return ccnet_org_manager_set_org_name (org_mgr, org_id, org_name, error);\n}\n\n#endif  /* SEAFILE_SERVER */\n"
  },
  {
    "path": "common/seaf-db.c",
    "content": "\n#include \"common.h\"\n\n#include \"log.h\"\n\n#include \"seaf-db.h\"\n\n#include <stdarg.h>\n#ifdef HAVE_MYSQL\n#include <mysql.h>\n#include <errmsg.h>\n#endif\n#include <sqlite3.h>\n#include <pthread.h>\n\nstruct DBConnPool {\n    GPtrArray *connections;\n    pthread_mutex_t lock;\n    int max_connections;\n};\ntypedef struct DBConnPool DBConnPool;\n\nstruct SeafDB {\n    int type;\n    DBConnPool *pool;\n};\n\ntypedef struct DBConnection {\n    gboolean is_available;\n    gboolean delete_pending;\n    DBConnPool *pool;\n} DBConnection;\n\nstruct SeafDBRow {\n    /* Empty */\n};\n\nstruct SeafDBTrans {\n    DBConnection *conn;\n    gboolean need_close;\n};\n\ntypedef struct DBOperations {\n    DBConnection* (*get_connection)(SeafDB *db);\n    void (*release_connection)(DBConnection *conn, gboolean need_close);\n    int (*execute_sql_no_stmt)(DBConnection *conn, const char *sql, gboolean *retry);\n    int (*execute_sql)(DBConnection *conn, const char *sql,\n                       int n, va_list args, gboolean *retry);\n    int (*query_foreach_row)(DBConnection *conn,\n                             const char *sql, SeafDBRowFunc callback, void *data,\n                             int n, va_list args, gboolean *retry);\n    int (*row_get_column_count)(SeafDBRow *row);\n    const char* (*row_get_column_string)(SeafDBRow *row, int idx);\n    int (*row_get_column_int)(SeafDBRow *row, int idx);\n    gint64 (*row_get_column_int64)(SeafDBRow *row, int idx);\n} DBOperations;\n\nstatic DBOperations db_ops;\n\n#ifdef HAVE_MYSQL\n\n/* MySQL Ops */\nstatic SeafDB *\nmysql_db_new (const char *host,\n              int port,\n              const char *user,\n              const char *password,\n              const char *db_name,\n              const char *unix_socket,\n              gboolean use_ssl,\n              gboolean skip_verify,\n              const char *ca_path,\n              const char *charset);\nstatic DBConnection *\nmysql_db_get_connection (SeafDB *db);\nstatic void\nmysql_db_release_connection (DBConnection *vconn);\nstatic int\nmysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry);\nstatic int\nmysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry);\nstatic int\nmysql_db_query_foreach_row (DBConnection *vconn, const char *sql,\n                            SeafDBRowFunc callback, void *data,\n                            int n, va_list args, gboolean *retry);\nstatic int\nmysql_db_row_get_column_count (SeafDBRow *row);\nstatic const char *\nmysql_db_row_get_column_string (SeafDBRow *row, int idx);\nstatic int\nmysql_db_row_get_column_int (SeafDBRow *row, int idx);\nstatic gint64\nmysql_db_row_get_column_int64 (SeafDBRow *row, int idx);\nstatic gboolean\nmysql_db_connection_ping (DBConnection *vconn);\n\nstatic DBConnPool *\ninit_conn_pool_common (int max_connections)\n{\n    DBConnPool *pool = g_new0(DBConnPool, 1);\n    pool->connections = g_ptr_array_sized_new (max_connections);\n    pthread_mutex_init (&pool->lock, NULL);\n    pool->max_connections = max_connections;\n\n    return pool;\n}\n\nstatic DBConnection *\nmysql_conn_pool_get_connection (SeafDB *db)\n{\n    DBConnPool *pool = db->pool;\n    DBConnection *conn = NULL;\n    DBConnection *d_conn = NULL;\n\n    if (pool->max_connections == 0) {\n        conn = mysql_db_get_connection (db);\n        conn->pool = pool;\n        return conn;\n    }\n\n    pthread_mutex_lock (&pool->lock);\n\n    guint i, size = pool->connections->len;\n    for (i = 0; i < size; ++i) {\n        conn = g_ptr_array_index (pool->connections, i);\n        if (!conn->is_available) {\n            continue;\n        }\n        if (mysql_db_connection_ping (conn)) {\n            conn->is_available = FALSE;\n            goto out;\n        }\n        conn->is_available = FALSE;\n        conn->delete_pending = TRUE;\n    }\n    conn = NULL;\n    if (size < pool->max_connections) {\n        conn = mysql_db_get_connection (db);\n        if (conn) {\n            conn->pool = pool;\n            conn->is_available = FALSE;\n            g_ptr_array_add (pool->connections, conn);\n        }\n    }\n\nout:\n    size = pool->connections->len;\n    if (size > 0) {\n        int index;\n        for (index = size - 1; index >= 0; index--) {\n            d_conn = g_ptr_array_index (pool->connections, index);\n            if (d_conn->delete_pending) {\n                g_ptr_array_remove (pool->connections, d_conn);\n                mysql_db_release_connection (d_conn);\n            }\n        }\n    }\n    pthread_mutex_unlock (&pool->lock);\n    return conn;\n}\n\nstatic void\nmysql_conn_pool_release_connection (DBConnection *conn, gboolean need_close)\n{\n    if (!conn)\n        return;\n\n    if (conn->pool->max_connections == 0) {\n        mysql_db_release_connection (conn);\n        return;\n    }\n\n    if (need_close) {\n        pthread_mutex_lock (&conn->pool->lock);\n        g_ptr_array_remove (conn->pool->connections, conn);\n        pthread_mutex_unlock (&conn->pool->lock);\n        mysql_db_release_connection (conn);\n        return;\n    }\n\n    pthread_mutex_lock (&conn->pool->lock);\n    conn->is_available = TRUE;\n    pthread_mutex_unlock (&conn->pool->lock);\n}\n\n#define KEEPALIVE_INTERVAL 30\nstatic void *\nmysql_conn_keepalive (void *arg)\n{\n    DBConnPool *pool = arg;\n    DBConnection *conn = NULL;\n    DBConnection *d_conn = NULL;\n    char *sql = \"SELECT 1;\";\n    int rc = 0;\n    va_list args;\n\n    while (1) {\n        pthread_mutex_lock (&pool->lock);\n\n        guint i, size = pool->connections->len;\n        for (i = 0; i < size; ++i) {\n            conn = g_ptr_array_index (pool->connections, i);\n            if (conn->is_available) {\n                rc = db_ops.execute_sql (conn, sql, 0, args, NULL);\n                if (rc < 0) {\n                    conn->is_available = FALSE;\n                    conn->delete_pending = TRUE;\n                }\n            }\n        }\n\n        if (size > 0) {\n            int index;\n            for (index = size - 1; index >= 0; index--) {\n                d_conn = g_ptr_array_index (pool->connections, index);\n                if (d_conn->delete_pending) {\n                    g_ptr_array_remove (pool->connections, d_conn);\n                    mysql_db_release_connection (d_conn);\n                }\n            }\n        }\n\n        pthread_mutex_unlock (&pool->lock);\n\n        sleep (KEEPALIVE_INTERVAL);\n    }\n\n    return NULL;\n}\n\nSeafDB *\nseaf_db_new_mysql (const char *host,\n                   int port,\n                   const char *user, \n                   const char *passwd,\n                   const char *db_name,\n                   const char *unix_socket,\n                   gboolean use_ssl,\n                   gboolean skip_verify,\n                   const char *ca_path,\n                   const char *charset,\n                   int max_connections)\n{\n    SeafDB *db;\n\n    db = mysql_db_new (host, port, user, passwd, db_name, unix_socket, use_ssl, skip_verify, ca_path, charset);\n    if (!db)\n        return NULL;\n    db->type = SEAF_DB_TYPE_MYSQL;\n\n    db_ops.get_connection = mysql_conn_pool_get_connection;\n    db_ops.release_connection = mysql_conn_pool_release_connection;\n    db_ops.execute_sql_no_stmt = mysql_db_execute_sql_no_stmt;\n    db_ops.execute_sql = mysql_db_execute_sql;\n    db_ops.query_foreach_row = mysql_db_query_foreach_row;\n    db_ops.row_get_column_count = mysql_db_row_get_column_count;\n    db_ops.row_get_column_string = mysql_db_row_get_column_string;\n    db_ops.row_get_column_int = mysql_db_row_get_column_int;\n    db_ops.row_get_column_int64 = mysql_db_row_get_column_int64;\n\n    db->pool = init_conn_pool_common (max_connections);\n\n    pthread_t tid;\n    int ret = pthread_create (&tid, NULL, mysql_conn_keepalive, db->pool);\n    if (ret != 0) {\n        seaf_warning (\"Failed to create mysql connection keepalive thread.\\n\");\n        return NULL;\n    }\n    pthread_detach (tid);\n\n    return db;\n}\n\n#endif\n\n/* SQLite Ops */\nstatic SeafDB *\nsqlite_db_new (const char *db_path);\nstatic DBConnection *\nsqlite_db_get_connection (SeafDB *db);\nstatic void\nsqlite_db_release_connection (DBConnection *vconn, gboolean need_close);\nstatic int\nsqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry);\nstatic int\nsqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry);\nstatic int\nsqlite_db_query_foreach_row (DBConnection *vconn, const char *sql,\n                             SeafDBRowFunc callback, void *data,\n                             int n, va_list args, gboolean *retry);\nstatic int\nsqlite_db_row_get_column_count (SeafDBRow *row);\nstatic const char *\nsqlite_db_row_get_column_string (SeafDBRow *row, int idx);\nstatic int\nsqlite_db_row_get_column_int (SeafDBRow *row, int idx);\nstatic gint64\nsqlite_db_row_get_column_int64 (SeafDBRow *row, int idx);\n\nSeafDB *\nseaf_db_new_sqlite (const char *db_path, int max_connections)\n{\n    SeafDB *db;\n\n    db = sqlite_db_new (db_path);\n    if (!db)\n        return NULL;\n    db->type = SEAF_DB_TYPE_SQLITE;\n\n    db_ops.get_connection = sqlite_db_get_connection;\n    db_ops.release_connection = sqlite_db_release_connection;\n    db_ops.execute_sql_no_stmt = sqlite_db_execute_sql_no_stmt;\n    db_ops.execute_sql = sqlite_db_execute_sql;\n    db_ops.query_foreach_row = sqlite_db_query_foreach_row;\n    db_ops.row_get_column_count = sqlite_db_row_get_column_count;\n    db_ops.row_get_column_string = sqlite_db_row_get_column_string;\n    db_ops.row_get_column_int = sqlite_db_row_get_column_int;\n    db_ops.row_get_column_int64 = sqlite_db_row_get_column_int64;\n\n    return db;\n}\n\nint\nseaf_db_type (SeafDB *db)\n{\n    return db->type;\n}\n\nint\nseaf_db_query (SeafDB *db, const char *sql)\n{\n    int ret = -1; \n    int retry_count = 0;\n\n    while (ret < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return -1;\n\n        ret = db_ops.execute_sql_no_stmt (conn, sql, &retry);\n\n        db_ops.release_connection (conn, ret < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\ngboolean\nseaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err)\n{\n    return seaf_db_statement_exists (db, sql, db_err, 0);\n}\n\nint\nseaf_db_foreach_selected_row (SeafDB *db, const char *sql, \n                              SeafDBRowFunc callback, void *data)\n{\n    return seaf_db_statement_foreach_row (db, sql, callback, data, 0);\n}\n\nconst char *\nseaf_db_row_get_column_text (SeafDBRow *row, guint32 idx)\n{\n    g_return_val_if_fail (idx < db_ops.row_get_column_count(row), NULL);\n\n    return db_ops.row_get_column_string (row, idx);\n}\n\nint\nseaf_db_row_get_column_int (SeafDBRow *row, guint32 idx)\n{\n    g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1);\n\n    return db_ops.row_get_column_int (row, idx);\n}\n\ngint64\nseaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx)\n{\n    g_return_val_if_fail (idx < db_ops.row_get_column_count(row), -1);\n\n    return db_ops.row_get_column_int64 (row, idx);\n}\n\nint\nseaf_db_get_int (SeafDB *db, const char *sql)\n{\n    return seaf_db_statement_get_int (db, sql, 0);\n}\n\ngint64\nseaf_db_get_int64 (SeafDB *db, const char *sql)\n{\n    return seaf_db_statement_get_int64 (db, sql, 0);\n}\n\nchar *\nseaf_db_get_string (SeafDB *db, const char *sql)\n{\n    return seaf_db_statement_get_string (db, sql, 0);\n}\n\nint\nseaf_db_statement_query (SeafDB *db, const char *sql, int n, ...)\n{\n    int ret = -1;\n    int retry_count = 0;\n\n    while (ret < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return -1;\n\n        va_list args;\n        va_start (args, n);\n        ret = db_ops.execute_sql (conn, sql, n, args, &retry);\n        va_end (args);\n\n        db_ops.release_connection (conn, ret < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\ngboolean\nseaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...)\n{\n    int n_rows = -1;\n    int retry_count = 0;\n\n    while (n_rows < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection(db);\n        if (!conn) {\n            *db_err = TRUE;\n            return FALSE;\n        }\n\n        va_list args;\n        va_start (args, n);\n        n_rows = db_ops.query_foreach_row (conn, sql, NULL, NULL, n, args, &retry);\n        va_end (args);\n\n        db_ops.release_connection(conn, n_rows < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    if (n_rows < 0) {\n        *db_err = TRUE;\n        return FALSE;\n    } else {\n        *db_err = FALSE;\n        return (n_rows != 0);\n    }\n}\n\nint\nseaf_db_statement_foreach_row (SeafDB *db, const char *sql,\n                               SeafDBRowFunc callback, void *data,\n                               int n, ...)\n{\n    int ret = -1;\n    int retry_count = 0;\n\n    while (ret < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return -1;\n\n        va_list args;\n        va_start (args, n);\n        ret = db_ops.query_foreach_row (conn, sql, callback, data, n, args, &retry);\n        va_end (args);\n\n        db_ops.release_connection (conn, ret < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\nstatic gboolean\nget_int_cb (SeafDBRow *row, void *data)\n{\n    int *pret = (int*)data;\n\n    *pret = seaf_db_row_get_column_int (row, 0);\n\n    return FALSE;\n}\n\nint\nseaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...)\n{\n    int ret = -1;\n    int rc = -1;\n    int retry_count = 0;\n\n    while (rc < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return -1;\n\n        va_list args;\n        va_start (args, n);\n        rc = db_ops.query_foreach_row (conn, sql, get_int_cb, &ret, n, args, &retry);\n        va_end (args);\n\n        db_ops.release_connection (conn, rc < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\nstatic gboolean\nget_int64_cb (SeafDBRow *row, void *data)\n{\n    gint64 *pret = (gint64*)data;\n\n    *pret = seaf_db_row_get_column_int64 (row, 0);\n\n    return FALSE;\n}\n\ngint64\nseaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...)\n{\n    gint64 ret = -1;\n    int rc = -1;\n    int retry_count = 0;\n\n    while (rc < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return -1;\n\n        va_list args;\n        va_start (args, n);\n        rc = db_ops.query_foreach_row (conn, sql, get_int64_cb, &ret, n, args, &retry);\n        va_end(args);\n\n        db_ops.release_connection (conn, rc < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\nstatic gboolean\nget_string_cb (SeafDBRow *row, void *data)\n{\n    char **pret = (char**)data;\n\n    *pret = g_strdup(seaf_db_row_get_column_text (row, 0));\n\n    return FALSE;\n}\n\nchar *\nseaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...)\n{\n    char *ret = NULL;\n    int rc = -1;\n    int retry_count = 0;\n\n    while (rc < 0) {\n        gboolean retry = FALSE;\n        DBConnection *conn = db_ops.get_connection (db);\n        if (!conn)\n            return NULL;\n\n        va_list args;\n        va_start (args, n);\n        rc = db_ops.query_foreach_row (conn, sql, get_string_cb, &ret, n, args, &retry);\n        va_end(args);\n\n        db_ops.release_connection (conn, rc < 0);\n\n        if (!retry || retry_count >= 3) {\n            break;\n        }\n        retry_count++;\n        seaf_warning (\"The mysql connection has expired, creating a new connection to re-query.\\n\");\n    }\n\n    return ret;\n}\n\n/* Transaction */\n\nSeafDBTrans *\nseaf_db_begin_transaction (SeafDB *db)\n{\n    SeafDBTrans *trans = NULL;\n    DBConnection *conn = db_ops.get_connection(db);\n    if (!conn) {\n        return trans;\n    }\n\n    if (db_ops.execute_sql_no_stmt (conn, \"BEGIN\", NULL) < 0) {\n        db_ops.release_connection (conn, TRUE);\n        return trans;\n    }\n\n    trans = g_new0 (SeafDBTrans, 1);\n    trans->conn = conn;\n\n    return trans;\n}\n\nvoid\nseaf_db_trans_close (SeafDBTrans *trans)\n{\n    db_ops.release_connection (trans->conn, trans->need_close);\n    g_free (trans);\n}\n\nint\nseaf_db_commit (SeafDBTrans *trans)\n{\n    DBConnection *conn = trans->conn;\n\n    if (db_ops.execute_sql_no_stmt (conn, \"COMMIT\", NULL) < 0) {\n        trans->need_close = TRUE;\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseaf_db_rollback (SeafDBTrans *trans)\n{\n    DBConnection *conn = trans->conn;\n\n    if (db_ops.execute_sql_no_stmt (conn, \"ROLLBACK\", NULL) < 0) {\n        trans->need_close = TRUE;\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...)\n{\n    int ret;\n\n    va_list args;\n    va_start (args, n);\n    ret = db_ops.execute_sql (trans->conn, sql, n, args, NULL);\n    va_end (args);\n\n    if (ret < 0)\n        trans->need_close = TRUE;\n\n    return ret;\n}\n\ngboolean\nseaf_db_trans_check_for_existence (SeafDBTrans *trans,\n                                   const char *sql,\n                                   gboolean *db_err,\n                                   int n, ...)\n{\n    int n_rows;\n\n    va_list args;\n    va_start (args, n);\n    n_rows = db_ops.query_foreach_row (trans->conn, sql, NULL, NULL, n, args, NULL);\n    va_end (args);\n\n    if (n_rows < 0) {\n        trans->need_close = TRUE;\n        *db_err = TRUE;\n        return FALSE;\n    } else {\n        *db_err = FALSE;\n        return (n_rows != 0);\n    }\n}\n\nint\nseaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql, \n                                    SeafDBRowFunc callback, void *data,\n                                    int n, ...)\n{\n    int ret;\n\n    va_list args;\n    va_start (args, n);\n    ret = db_ops.query_foreach_row (trans->conn, sql, callback, data, n, args, NULL);\n    va_end (args);\n\n    if (ret < 0)\n        trans->need_close = TRUE;\n\n    return ret;\n}\n\nint\nseaf_db_row_get_column_count (SeafDBRow *row)\n{\n    return db_ops.row_get_column_count(row);\n}\n\n#ifdef HAVE_MYSQL\n\n/* MySQL DB */\n\ntypedef struct MySQLDB {\n    struct SeafDB parent;\n    char *host;\n    char *user;\n    char *password;\n    unsigned int port;\n    char *db_name;\n    char *unix_socket;\n    gboolean use_ssl;\n    gboolean skip_verify;\n    char *ca_path;\n    char *charset;\n} MySQLDB;\n\ntypedef struct MySQLDBConnection {\n    struct DBConnection parent;\n    MYSQL *db_conn;\n} MySQLDBConnection;\n\nstatic gboolean\nmysql_db_connection_ping (DBConnection *vconn)\n{\n    MySQLDBConnection *conn = (MySQLDBConnection *)vconn;\n\n    return (mysql_ping (conn->db_conn) == 0);\n}\n\nstatic SeafDB *\nmysql_db_new (const char *host,\n              int port,\n              const char *user,\n              const char *password,\n              const char *db_name,\n              const char *unix_socket,\n              gboolean use_ssl,\n              gboolean skip_verify,\n              const char *ca_path,\n              const char *charset)\n{\n    MySQLDB *db = g_new0 (MySQLDB, 1);\n\n    db->host = g_strdup (host);\n    db->user = g_strdup (user);\n    db->password = g_strdup (password);\n    db->port = port;\n    db->db_name = g_strdup(db_name);\n    db->unix_socket = g_strdup(unix_socket);\n    db->use_ssl = use_ssl;\n    db->skip_verify = skip_verify;\n    db->ca_path = g_strdup(ca_path);\n    db->charset = g_strdup(charset);\n\n    mysql_library_init (0, NULL, NULL);\n\n    return (SeafDB *)db;\n}\n\ntypedef char my_bool;\n\nstatic DBConnection *\nmysql_db_get_connection (SeafDB *vdb)\n{\n    MySQLDB *db = (MySQLDB *)vdb;\n    int conn_timeout = 1;\n    int read_write_timeout = 60;\n    MYSQL *db_conn;\n    MySQLDBConnection *conn = NULL;\n    int ssl_mode;\n\n    db_conn = mysql_init (NULL);\n    if (!db_conn) {\n        seaf_warning (\"Failed to init mysql connection object.\\n\");\n        return NULL;\n    }\n\n    if (db->use_ssl && !db->skip_verify) {\n#ifndef LIBMARIADB\n        // Set ssl_mode to SSL_MODE_VERIFY_IDENTITY to verify server cert.\n        // When ssl_mode is set to SSL_MODE_VERIFY_IDENTITY, MYSQL_OPT_SSL_CA is required to verify server cert.\n        // Refer to: https://dev.mysql.com/doc/c-api/5.7/en/mysql-options.html\n        ssl_mode = SSL_MODE_VERIFY_IDENTITY;\n        mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode);\n        mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path);\n#else\n        static my_bool verify= 1;\n        mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);\n        mysql_options(db_conn, MYSQL_OPT_SSL_CA, db->ca_path);\n#endif\n    } else if (db->use_ssl && db->skip_verify) {\n#ifndef LIBMARIADB\n        // Set ssl_mode to SSL_MODE_PREFERRED to skip verify server cert.\n        ssl_mode = SSL_MODE_PREFERRED;\n        mysql_options(db_conn, MYSQL_OPT_SSL_MODE, &ssl_mode);\n#else\n        static my_bool verify= 0;\n        mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);\n#endif\n    } else {\n#ifdef LIBMARIADB\n        static my_bool verify= 0;\n        mysql_optionsv(db_conn, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (void *)&verify);\n#endif\n    }\n\n    if (db->charset)\n        mysql_options(db_conn, MYSQL_SET_CHARSET_NAME, db->charset);\n\n    if (db->unix_socket) {\n        int pro_type = MYSQL_PROTOCOL_SOCKET;\n        mysql_options (db_conn, MYSQL_OPT_PROTOCOL, &pro_type);\n        if (!db->user) {\n#ifndef LIBMARIADB\n           mysql_options (db_conn, MYSQL_DEFAULT_AUTH, \"unix_socket\");\n#else\n           mysql_options (db_conn, MARIADB_OPT_UNIXSOCKET, (void *)db->unix_socket);\n#endif\n        }\n    }\n\n    mysql_options(db_conn, MYSQL_OPT_CONNECT_TIMEOUT, (const char*)&conn_timeout);\n    mysql_options(db_conn, MYSQL_OPT_READ_TIMEOUT, (const char*)&read_write_timeout);\n    mysql_options(db_conn, MYSQL_OPT_WRITE_TIMEOUT, (const char*)&read_write_timeout);\n\n    if (!mysql_real_connect(db_conn, db->host, db->user, db->password,\n                            db->db_name, db->port,\n                            db->unix_socket, CLIENT_MULTI_STATEMENTS)) {\n        seaf_warning (\"Failed to connect to MySQL: %s\\n\", mysql_error(db_conn));\n        mysql_close (db_conn);\n        return NULL;\n    }\n\n    conn = g_new0 (MySQLDBConnection, 1);\n    conn->db_conn = db_conn;\n\n    return (DBConnection *)conn;\n}\n\nstatic void\nmysql_db_release_connection (DBConnection *vconn)\n{\n    if (!vconn)\n        return;\n\n    MySQLDBConnection *conn = (MySQLDBConnection *)vconn;\n\n    mysql_close (conn->db_conn);\n\n    g_free (conn);\n}\n\nstatic int\nmysql_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry)\n{\n    MySQLDBConnection *conn = (MySQLDBConnection *)vconn;\n    int rc;\n\n    rc = mysql_query (conn->db_conn, sql);\n    if (rc == 0) {\n        return 0;\n    }\n\n    if (rc == CR_SERVER_GONE_ERROR || rc == CR_SERVER_LOST) {\n        if (retry)\n            *retry = TRUE;\n    }\n\n    seaf_warning (\"Failed to execute sql %s: %s\\n\", sql, mysql_error(conn->db_conn));\n    return -1;\n}\n\nstatic MYSQL_STMT *\n_prepare_stmt_mysql (MYSQL *db, const char *sql, gboolean *retry)\n{\n    MYSQL_STMT *stmt;\n\n    stmt = mysql_stmt_init (db);\n    if (!stmt) {\n        seaf_warning (\"mysql_stmt_init failed.\\n\");\n        return NULL;\n    }\n\n    if (mysql_stmt_prepare (stmt, sql, strlen(sql)) != 0) {\n        int err_code = mysql_stmt_errno (stmt);\n        if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {\n            if (retry)\n                *retry = TRUE;\n        }\n        seaf_warning (\"Failed to prepare sql %s: %s\\n\", sql, mysql_stmt_error(stmt));\n        mysql_stmt_close (stmt);\n        return NULL;\n    }\n\n    return stmt;\n}\n\nstatic int\n_bind_params_mysql (MYSQL_STMT *stmt, MYSQL_BIND *params, int n, va_list args)\n{\n    int i;\n    const char *type;\n\n    for (i = 0; i < n; ++i) {\n        type = va_arg (args, const char *);\n        if (strcmp(type, \"int\") == 0) {\n            int x = va_arg (args, int);\n            int *pval = g_new (int, 1);\n            *pval = x;\n            params[i].buffer_type = MYSQL_TYPE_LONG;\n            params[i].buffer = pval;\n            params[i].is_null = 0;\n        } else if (strcmp (type, \"int64\") == 0) {\n            gint64 x = va_arg (args, gint64);\n            gint64 *pval = g_new (gint64, 1);\n            *pval = x;\n            params[i].buffer_type = MYSQL_TYPE_LONGLONG;\n            params[i].buffer = pval;\n            params[i].is_null = 0;\n        } else if (strcmp (type, \"string\") == 0) {\n            const char *s = va_arg (args, const char *);\n            static my_bool yes = TRUE;\n            params[i].buffer_type = MYSQL_TYPE_STRING;\n            params[i].buffer = g_strdup(s);\n            unsigned long *plen = g_new (unsigned long, 1);\n            params[i].length = plen;\n            if (!s) {\n                *plen = 0;\n                params[i].buffer_length = 0;\n                params[i].is_null = &yes;\n            } else {\n                *plen = strlen(s);\n                params[i].buffer_length = *plen + 1;\n                params[i].is_null = 0;\n            }\n        } else {\n            seaf_warning (\"BUG: invalid prep stmt parameter type %s.\\n\", type);\n            g_return_val_if_reached (-1);\n        }\n    }\n\n    if (mysql_stmt_bind_param (stmt, params) != 0) {\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic int\nmysql_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry)\n{\n    MySQLDBConnection *conn = (MySQLDBConnection *)vconn;\n    MYSQL *db = conn->db_conn;\n    MYSQL_STMT *stmt = NULL;\n    MYSQL_BIND *params = NULL;\n    int ret = 0;\n\n    stmt = _prepare_stmt_mysql (db, sql, retry);\n    if (!stmt) {\n        return -1;\n    }\n\n    if (n > 0) {\n        params = g_new0 (MYSQL_BIND, n);\n        if (_bind_params_mysql (stmt, params, n, args) < 0) {\n            seaf_warning (\"Failed to bind parameters for %s: %s.\\n\",\n                          sql, mysql_stmt_error(stmt));\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (mysql_stmt_execute (stmt) != 0) {\n        seaf_warning (\"Failed to execute sql %s: %s\\n\", sql, mysql_stmt_error(stmt));\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (ret < 0) {\n        int err_code = mysql_stmt_errno (stmt);\n        if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {\n            if (retry)\n                *retry = TRUE;\n        }\n    }\n    if (stmt)\n        mysql_stmt_close (stmt);\n    if (params) {\n        int i;\n        for (i = 0; i < n; ++i) {\n            g_free (params[i].buffer);\n            g_free (params[i].length);\n        }\n        g_free (params);\n    }\n    return ret;\n}\n\ntypedef struct MySQLDBRow {\n    SeafDBRow parent;\n    int column_count;\n    MYSQL_STMT *stmt;\n    MYSQL_BIND *results;\n    /* Used when returned columns are truncated. */\n    MYSQL_BIND *new_binds;\n} MySQLDBRow;\n\n#define DEFAULT_MYSQL_COLUMN_SIZE 1024\n\nstatic int\nmysql_db_query_foreach_row (DBConnection *vconn, const char *sql,\n                            SeafDBRowFunc callback, void *data,\n                            int n, va_list args, gboolean *retry)\n{\n    MySQLDBConnection *conn = (MySQLDBConnection *)vconn;\n    MYSQL *db = conn->db_conn;\n    MYSQL_STMT *stmt = NULL;\n    MYSQL_BIND *params = NULL;\n    MySQLDBRow row;\n    int err_code;\n    int nrows = 0;\n    int i;\n\n    memset (&row, 0, sizeof(row));\n\n    stmt = _prepare_stmt_mysql (db, sql, retry);\n    if (!stmt) {\n        return -1;\n    }\n\n    if (n > 0) {\n        params = g_new0 (MYSQL_BIND, n);\n        if (_bind_params_mysql (stmt, params, n, args) < 0) {\n            nrows = -1;\n            err_code = mysql_stmt_errno (stmt);\n            if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {\n                if (retry)\n                    *retry = TRUE;\n            }\n            goto out;\n        }\n    }\n\n    if (mysql_stmt_execute (stmt) != 0) {\n        seaf_warning (\"Failed to execute sql %s: %s\\n\", sql, mysql_stmt_error(stmt));\n        nrows = -1;\n        err_code = mysql_stmt_errno (stmt);\n        if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {\n            if (retry)\n                *retry = TRUE;\n        }\n        goto out;\n    }\n\n    row.column_count = mysql_stmt_field_count (stmt);\n    row.stmt = stmt;\n    row.results = g_new0 (MYSQL_BIND, row.column_count);\n    for (i = 0; i < row.column_count; ++i) {\n        row.results[i].buffer = g_malloc (DEFAULT_MYSQL_COLUMN_SIZE + 1);\n        /* Ask MySQL to convert fields to string, to avoid the trouble of\n         * checking field types.\n         */\n        row.results[i].buffer_type = MYSQL_TYPE_STRING;\n        row.results[i].buffer_length = DEFAULT_MYSQL_COLUMN_SIZE;\n        row.results[i].length = g_new0 (unsigned long, 1);\n        row.results[i].is_null = g_new0 (my_bool, 1);\n    }\n    row.new_binds = g_new0 (MYSQL_BIND, row.column_count);\n\n    if (mysql_stmt_bind_result (stmt, row.results) != 0) {\n        seaf_warning (\"Failed to bind result for sql %s: %s\\n\", sql, mysql_stmt_error(stmt));\n        nrows = -1;\n        err_code = mysql_stmt_errno (stmt);\n        if (err_code == CR_SERVER_GONE_ERROR || err_code == CR_SERVER_LOST) {\n            if (retry)\n                *retry = TRUE;\n        }\n        goto out;\n    }\n\n    int rc;\n    gboolean next_row = TRUE;\n    while (1) {\n        rc = mysql_stmt_fetch (stmt);\n        if (rc == 1) {\n            seaf_warning (\"Failed to fetch result for sql %s: %s\\n\",\n                          sql, mysql_stmt_error(stmt));\n            nrows = -1;\n            // Don't need to retry, some rows may have been fetched.\n            goto out;\n        }\n        if (rc == MYSQL_NO_DATA)\n            break;\n\n        /* rc == 0 or rc == MYSQL_DATA_TRUNCATED */\n\n        ++nrows;\n        if (callback)\n            next_row = callback ((SeafDBRow *)&row, data);\n\n        for (i = 0; i < row.column_count; ++i) {\n            g_free (row.new_binds[i].buffer);\n            g_free (row.new_binds[i].length);\n            g_free (row.new_binds[i].is_null);\n            memset (&row.new_binds[i], 0, sizeof(MYSQL_BIND));\n        }\n\n        if (!next_row)\n            break;\n    }\n\nout:\n    if (stmt) {\n        mysql_stmt_free_result (stmt);\n        mysql_stmt_close (stmt);\n    }\n    if (params) {\n        for (i = 0; i < n; ++i) {\n            g_free (params[i].buffer);\n            g_free (params[i].length);\n        }\n        g_free (params);\n    }\n    if (row.results) {\n        for (i = 0; i < row.column_count; ++i) {\n            g_free (row.results[i].buffer);\n            g_free (row.results[i].length);\n            g_free (row.results[i].is_null);\n        }\n        g_free (row.results);\n    }\n    if (row.new_binds) {\n        for (i = 0; i < row.column_count; ++i) {\n            g_free (row.new_binds[i].buffer);\n            g_free (row.new_binds[i].length);\n            g_free (row.new_binds[i].is_null);\n        }\n        g_free (row.new_binds);\n    }\n    return nrows;\n}\n\nstatic int\nmysql_db_row_get_column_count (SeafDBRow *vrow)\n{\n    MySQLDBRow *row = (MySQLDBRow *)vrow;\n    return row->column_count;\n}\n\nstatic const char *\nmysql_db_row_get_column_string (SeafDBRow *vrow, int i)\n{\n    MySQLDBRow *row = (MySQLDBRow *)vrow;\n\n    if (*(row->results[i].is_null)) {\n        return NULL;\n    }\n\n    char *ret = NULL;\n    unsigned long real_length = *(row->results[i].length);\n    /* If column size is larger then allocated buffer size, re-allocate a new buffer\n     * and fetch the column directly.\n     */\n    if (real_length > row->results[i].buffer_length) {\n        row->new_binds[i].buffer = g_malloc (real_length + 1);\n        row->new_binds[i].buffer_type = MYSQL_TYPE_STRING;\n        row->new_binds[i].buffer_length = real_length;\n        row->new_binds[i].length = g_new0 (unsigned long, 1);\n        row->new_binds[i].is_null = g_new0 (my_bool, 1);\n        if (mysql_stmt_fetch_column (row->stmt, &row->new_binds[i], i, 0) != 0) {\n            seaf_warning (\"Faield to fetch column: %s\\n\", mysql_stmt_error(row->stmt));\n            return NULL;\n        }\n\n        ret = row->new_binds[i].buffer;\n    } else {\n        ret = row->results[i].buffer;\n    }\n    ret[real_length] = 0;\n\n    return ret;\n}\n\nstatic int\nmysql_db_row_get_column_int (SeafDBRow *vrow, int idx)\n{\n    const char *str;\n    char *e;\n    int ret;\n\n    str = mysql_db_row_get_column_string (vrow, idx);\n    if (!str) {\n        return 0;\n    }\n\n    errno = 0;\n    ret = strtol (str, &e, 10);\n    if (errno || (e == str)) {\n        seaf_warning (\"Number conversion failed.\\n\");\n        return -1;\n    }\n\n    return ret;\n}\n\nstatic gint64\nmysql_db_row_get_column_int64 (SeafDBRow *vrow, int idx)\n{\n    const char *str;\n    char *e;\n    gint64 ret;\n\n    str = mysql_db_row_get_column_string (vrow, idx);\n    if (!str) {\n        return 0;\n    }\n\n    errno = 0;\n    ret = strtoll (str, &e, 10);\n    if (errno || (e == str)) {\n        seaf_warning (\"Number conversion failed.\\n\");\n        return -1;\n    }\n\n    return ret;\n}\n\n#endif  /* HAVE_MYSQL */\n\n/* SQLite DB */\n\n/* SQLite thread synchronization rountines.\n * See https://www.sqlite.org/unlock_notify.html\n */\n\ntypedef struct UnlockNotification {\n        int fired;\n        pthread_cond_t cond;\n        pthread_mutex_t mutex;\n} UnlockNotification;\n\nstatic void\nunlock_notify_cb(void **ap_arg, int n_arg)\n{\n    int i;\n\n    for (i = 0; i < n_arg; i++) {\n        UnlockNotification *p = (UnlockNotification *)ap_arg[i];\n        pthread_mutex_lock (&p->mutex);\n        p->fired = 1;\n        pthread_cond_signal (&p->cond);\n        pthread_mutex_unlock (&p->mutex);\n    }\n}\n\nstatic int\nwait_for_unlock_notify(sqlite3 *db)\n{\n    UnlockNotification un;\n    un.fired = 0;\n    pthread_mutex_init (&un.mutex, NULL);\n    pthread_cond_init (&un.cond, NULL);\n\n    int rc = sqlite3_unlock_notify(db, unlock_notify_cb, (void *)&un);\n\n    if (rc == SQLITE_OK) {\n        pthread_mutex_lock(&un.mutex);\n        if (!un.fired)\n            pthread_cond_wait (&un.cond, &un.mutex);\n        pthread_mutex_unlock(&un.mutex);\n    }\n\n    pthread_cond_destroy (&un.cond);\n    pthread_mutex_destroy (&un.mutex);\n\n    return rc;\n}\n\nstatic int\nsqlite3_blocking_step(sqlite3_stmt *stmt)\n{\n    int rc;\n    while (SQLITE_LOCKED == (rc = sqlite3_step(stmt))) {\n        rc = wait_for_unlock_notify(sqlite3_db_handle(stmt));\n        if (rc != SQLITE_OK)\n            break;\n        sqlite3_reset(stmt);\n    }\n    return rc;\n}\n\nstatic int\nsqlite3_blocking_prepare_v2(sqlite3 *db, const char *sql, int sql_len, sqlite3_stmt **pstmt, const char **pz)\n{\n    int rc;\n    while (SQLITE_LOCKED == (rc = sqlite3_prepare_v2(db, sql, sql_len, pstmt, pz))) {\n        rc = wait_for_unlock_notify(db);\n        if (rc != SQLITE_OK)\n            break;\n    }\n    return rc;\n}\n\nstatic int\nsqlite3_blocking_exec(sqlite3 *db, const char *sql, int (*callback)(void *, int, char **, char **), void *arg, char **errmsg)\n{\n    int rc;\n    while (SQLITE_LOCKED == (rc = sqlite3_exec(db, sql, callback, arg, errmsg))) {\n        rc = wait_for_unlock_notify(db);\n        if (rc != SQLITE_OK)\n            break;\n    }\n    return rc;\n}\n\ntypedef struct SQLiteDB {\n    SeafDB parent;\n    char *db_path;\n} SQLiteDB;\n\ntypedef struct SQLiteDBConnection {\n    DBConnection parent;\n    sqlite3 *db_conn;\n} SQLiteDBConnection;\n\nstatic SeafDB *\nsqlite_db_new (const char *db_path)\n{\n    SQLiteDB *db = g_new0 (SQLiteDB, 1);\n    db->db_path = g_strdup(db_path);\n\n    return (SeafDB *)db;\n}\n\nstatic DBConnection *\nsqlite_db_get_connection (SeafDB *vdb)\n{\n    SQLiteDB *db = (SQLiteDB *)vdb;\n    sqlite3 *db_conn;\n    int result;\n    const char *errmsg;\n    SQLiteDBConnection *conn;\n\n    result = sqlite3_open_v2 (db->db_path, &db_conn, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_SHAREDCACHE, NULL);\n    if (result != SQLITE_OK) {\n        errmsg = sqlite3_errmsg(db_conn);\n        seaf_warning (\"Failed to open sqlite db: %s\\n\", errmsg ? errmsg : \"no error given\");\n        return NULL;\n    }\n\n    conn = g_new0 (SQLiteDBConnection, 1);\n    conn->db_conn = db_conn;\n\n    return (DBConnection *)conn;\n}\n\nstatic void\nsqlite_db_release_connection (DBConnection *vconn, gboolean need_close)\n{\n    if (!vconn)\n        return;\n\n    SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;\n\n    sqlite3_close (conn->db_conn);\n\n    g_free (conn);\n}\n\nstatic int\nsqlite_db_execute_sql_no_stmt (DBConnection *vconn, const char *sql, gboolean *retry)\n{\n    SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;\n    char *errmsg = NULL;\n    int rc;\n\n    rc = sqlite3_blocking_exec (conn->db_conn, sql, NULL, NULL, &errmsg);\n    if (rc != SQLITE_OK) {\n        seaf_warning (\"sqlite3_exec failed %s: %s\", sql, errmsg ? errmsg : \"no error given\");\n        if (errmsg)\n            sqlite3_free (errmsg);\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic int\n_bind_parameters_sqlite (sqlite3 *db, sqlite3_stmt *stmt, int n, va_list args)\n{\n    int i;\n    const char *type;\n\n    for (i = 0; i < n; ++i) {\n        type = va_arg (args, const char *);\n        if (strcmp(type, \"int\") == 0) {\n            int x = va_arg (args, int);\n            if (sqlite3_bind_int (stmt, i+1, x) != SQLITE_OK) {\n                seaf_warning (\"sqlite3_bind_int failed: %s\\n\", sqlite3_errmsg(db));\n                return -1;\n            }\n        } else if (strcmp (type, \"int64\") == 0) {\n            gint64 x = va_arg (args, gint64);\n            if (sqlite3_bind_int64 (stmt, i+1, x) != SQLITE_OK) {\n                seaf_warning (\"sqlite3_bind_int64 failed: %s\\n\", sqlite3_errmsg(db));\n                return -1;\n            }\n        } else if (strcmp (type, \"string\") == 0) {\n            const char *s = va_arg (args, const char *);\n            if (sqlite3_bind_text (stmt, i+1, s, -1, SQLITE_TRANSIENT) != SQLITE_OK) {\n                seaf_warning (\"sqlite3_bind_text failed: %s\\n\", sqlite3_errmsg(db));\n                return -1;\n            }\n        } else {\n            seaf_warning (\"BUG: invalid prep stmt parameter type %s.\\n\", type);\n            g_return_val_if_reached (-1);\n        }\n    }\n\n    return 0;\n}\n\nstatic int\nsqlite_db_execute_sql (DBConnection *vconn, const char *sql, int n, va_list args, gboolean *retry)\n{\n    SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;\n    sqlite3 *db = conn->db_conn;\n    sqlite3_stmt *stmt;\n    int rc;\n    int ret = 0;\n\n    rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL);\n    if (rc != SQLITE_OK) {\n        seaf_warning (\"sqlite3_prepare_v2 failed %s: %s\", sql, sqlite3_errmsg(db));\n        return -1;\n    }\n\n    if (_bind_parameters_sqlite (db, stmt, n, args) < 0) {\n        seaf_warning (\"Failed to bind parameters for sql %s\\n\", sql);\n        ret = -1;\n        goto out;\n    }\n\n    rc = sqlite3_blocking_step (stmt);\n    if (rc != SQLITE_DONE) {\n        seaf_warning (\"sqlite3_step failed %s: %s\", sql, sqlite3_errmsg(db));\n        ret = -1;\n        goto out;\n    }\n\nout:\n    sqlite3_finalize (stmt);\n    return ret;\n}\n\ntypedef struct SQLiteDBRow {\n    SeafDBRow parent;\n    int column_count;\n    sqlite3 *db;\n    sqlite3_stmt *stmt;\n} SQLiteDBRow;\n\nstatic int\nsqlite_db_query_foreach_row (DBConnection *vconn, const char *sql,\n                             SeafDBRowFunc callback, void *data,\n                             int n, va_list args, gboolean *retry)\n{\n    SQLiteDBConnection *conn = (SQLiteDBConnection *)vconn;\n    sqlite3 *db = conn->db_conn;\n    sqlite3_stmt *stmt;\n    int rc;\n    int nrows = 0;\n\n    rc = sqlite3_blocking_prepare_v2 (db, sql, -1, &stmt, NULL);\n    if (rc != SQLITE_OK) {\n        seaf_warning (\"sqlite3_prepare_v2 failed %s: %s\", sql, sqlite3_errmsg(db));\n        return -1;\n    }\n\n    if (_bind_parameters_sqlite (db, stmt, n, args) < 0) {\n        seaf_warning (\"Failed to bind parameters for sql %s\\n\", sql);\n        nrows = -1;\n        goto out;\n    }\n\n    SQLiteDBRow row;\n    memset (&row, 0, sizeof(row));\n    row.db = db;\n    row.stmt = stmt;\n    row.column_count = sqlite3_column_count (stmt);\n\n    while (1) {\n        rc = sqlite3_blocking_step (stmt);\n        if (rc == SQLITE_ROW) {\n            ++nrows;\n            if (callback && !callback ((SeafDBRow *)&row, data))\n                break;\n        } else if (rc == SQLITE_DONE) {\n            break;\n        } else {\n            seaf_warning (\"sqlite3_step failed %s: %s\\n\", sql, sqlite3_errmsg(db));\n            nrows = -1;\n            goto out;\n        }\n    }\n\nout:\n    sqlite3_finalize (stmt);\n    return nrows;\n}\n\nstatic int\nsqlite_db_row_get_column_count (SeafDBRow *vrow)\n{\n    SQLiteDBRow *row = (SQLiteDBRow *)vrow;\n\n    return row->column_count;\n}\n\nstatic const char *\nsqlite_db_row_get_column_string (SeafDBRow *vrow, int idx)\n{\n    SQLiteDBRow *row = (SQLiteDBRow *)vrow;\n\n    return (const char *)sqlite3_column_text (row->stmt, idx);\n}\n\nstatic int\nsqlite_db_row_get_column_int (SeafDBRow *vrow, int idx)\n{\n    SQLiteDBRow *row = (SQLiteDBRow *)vrow;\n\n    return sqlite3_column_int (row->stmt, idx);\n}\n\nstatic gint64\nsqlite_db_row_get_column_int64 (SeafDBRow *vrow, int idx)\n{\n    SQLiteDBRow *row = (SQLiteDBRow *)vrow;\n\n    return sqlite3_column_int64 (row->stmt, idx);\n}\n"
  },
  {
    "path": "common/seaf-db.h",
    "content": "#ifndef SEAF_DB_H\n#define SEAF_DB_H\n\nenum {\n    SEAF_DB_TYPE_SQLITE,\n    SEAF_DB_TYPE_MYSQL,\n    SEAF_DB_TYPE_PGSQL,\n};\n\ntypedef struct SeafDB SeafDB;\ntypedef struct SeafDB CcnetDB;\ntypedef struct SeafDBRow SeafDBRow;\ntypedef struct SeafDBRow CcnetDBRow;\ntypedef struct SeafDBTrans SeafDBTrans;\ntypedef struct SeafDBTrans CcnetDBTrans;\n\ntypedef gboolean (*SeafDBRowFunc) (SeafDBRow *, void *);\ntypedef gboolean (*CcnetDBRowFunc) (CcnetDBRow *, void *);\n\nSeafDB *\nseaf_db_new_mysql (const char *host,\n                   int port,\n                   const char *user, \n                   const char *passwd,\n                   const char *db,\n                   const char *unix_socket,\n                   gboolean use_ssl,\n                   gboolean skip_verify,\n                   const char *ca_path,\n                   const char *charset,\n                   int max_connections);\n\n#if 0\nSeafDB *\nseaf_db_new_pgsql (const char *host,\n                   unsigned int port,\n                   const char *user,\n                   const char *passwd,\n                   const char *db_name,\n                   const char *unix_socket,\n                   int max_connections);\n#endif\n\nSeafDB *\nseaf_db_new_sqlite (const char *db_path, int max_connections);\n\nint\nseaf_db_type (SeafDB *db);\n\nint\nseaf_db_query (SeafDB *db, const char *sql);\n\ngboolean\nseaf_db_check_for_existence (SeafDB *db, const char *sql, gboolean *db_err);\n\nint\nseaf_db_foreach_selected_row (SeafDB *db, const char *sql, \n                              SeafDBRowFunc callback, void *data);\n\nconst char *\nseaf_db_row_get_column_text (SeafDBRow *row, guint32 idx);\n\nint\nseaf_db_row_get_column_int (SeafDBRow *row, guint32 idx);\n\ngint64\nseaf_db_row_get_column_int64 (SeafDBRow *row, guint32 idx);\n\nint\nseaf_db_get_int (SeafDB *db, const char *sql);\n\ngint64\nseaf_db_get_int64 (SeafDB *db, const char *sql);\n\nchar *\nseaf_db_get_string (SeafDB *db, const char *sql);\n\n/* Transaction related */\n\nSeafDBTrans *\nseaf_db_begin_transaction (SeafDB *db);\n\nvoid\nseaf_db_trans_close (SeafDBTrans *trans);\n\nint\nseaf_db_commit (SeafDBTrans *trans);\n\nint\nseaf_db_rollback (SeafDBTrans *trans);\n\nint\nseaf_db_trans_query (SeafDBTrans *trans, const char *sql, int n, ...);\n\ngboolean\nseaf_db_trans_check_for_existence (SeafDBTrans *trans,\n                                   const char *sql,\n                                   gboolean *db_err,\n                                   int n, ...);\n\nint\nseaf_db_trans_foreach_selected_row (SeafDBTrans *trans, const char *sql,\n                                    SeafDBRowFunc callback, void *data,\n                                    int n, ...);\n\nint\nseaf_db_row_get_column_count (SeafDBRow *row);\n\n/* Prepared Statements */\n\nint\nseaf_db_statement_query (SeafDB *db, const char *sql, int n, ...);\n\ngboolean\nseaf_db_statement_exists (SeafDB *db, const char *sql, gboolean *db_err, int n, ...);\n\nint\nseaf_db_statement_foreach_row (SeafDB *db, const char *sql,\n                                SeafDBRowFunc callback, void *data,\n                                int n, ...);\n\nint\nseaf_db_statement_get_int (SeafDB *db, const char *sql, int n, ...);\n\ngint64\nseaf_db_statement_get_int64 (SeafDB *db, const char *sql, int n, ...);\n\nchar *\nseaf_db_statement_get_string (SeafDB *db, const char *sql, int n, ...);\n\n#endif\n"
  },
  {
    "path": "common/seaf-utils.c",
    "content": "#include \"common.h\"\n\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n#include \"seaf-db.h\"\n#include \"utils.h\"\n\n#include <stdlib.h>\n#include <string.h>\n#include <ctype.h>\n#include <jwt.h>\n\n#define JWT_TOKEN_EXPIRE_TIME 3*24*3600 /* 3 days*/\n\nchar *\nseafile_session_get_tmp_file_path (SeafileSession *session,\n                                   const char *basename,\n                                   char path[])\n{\n    int path_len;\n\n    path_len = strlen (session->tmp_file_dir);\n    memcpy (path, session->tmp_file_dir, path_len + 1);\n    path[path_len] = '/';\n    strcpy (path + path_len + 1, basename);\n\n    return path;\n}\n\n#define DEFAULT_MAX_CONNECTIONS 100\n\n#define SQLITE_DB_NAME \"seafile.db\"\n#define CCNET_DB \"ccnet.db\"\n\nstatic int\nsqlite_db_start (SeafileSession *session)\n{\n    char *db_path;\n    int max_connections = 0;\n\n    max_connections = g_key_file_get_integer (session->config,\n                                              \"database\", \"max_connections\",\n                                              NULL);\n    if (max_connections <= 0)\n        max_connections = DEFAULT_MAX_CONNECTIONS;\n\n    db_path = g_build_filename (session->seaf_dir, SQLITE_DB_NAME, NULL);\n    session->db = seaf_db_new_sqlite (db_path, max_connections);\n    if (!session->db) {\n        seaf_warning (\"Failed to start sqlite db.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\n#ifdef HAVE_MYSQL\n\n#define MYSQL_DEFAULT_PORT 3306\n\ntypedef struct DBOption {\n    char *user;\n    char *passwd;\n    char *host;\n    char *ca_path;\n    char *charset;\n    char *ccnet_db_name;\n    char *seafile_db_name;\n    gboolean use_ssl;\n    gboolean skip_verify;\n    int port;\n    int max_connections;\n} DBOption;\n\nstatic void\ndb_option_free (DBOption *option)\n{\n    if (!option)\n        return;\n    g_free (option->user);\n    g_free (option->passwd);\n    g_free (option->host);\n    g_free (option->ca_path);\n    g_free (option->charset);\n    g_free (option->ccnet_db_name);\n    g_free (option->seafile_db_name);\n    g_free (option);\n}\n\nstatic int\nload_db_option_from_env (DBOption *option)\n{\n    const char *env_user, *env_passwd, *env_host, *env_ccnet_db, *env_seafile_db, *env_port;\n\n    env_user = g_getenv(\"SEAFILE_MYSQL_DB_USER\");\n    env_passwd = g_getenv(\"SEAFILE_MYSQL_DB_PASSWORD\");\n    env_host = g_getenv(\"SEAFILE_MYSQL_DB_HOST\");\n    env_port = g_getenv(\"SEAFILE_MYSQL_DB_PORT\");\n    env_ccnet_db = g_getenv(\"SEAFILE_MYSQL_DB_CCNET_DB_NAME\");\n    env_seafile_db = g_getenv(\"SEAFILE_MYSQL_DB_SEAFILE_DB_NAME\");\n\n    if (env_user && g_strcmp0 (env_user, \"\") != 0) {\n        g_free (option->user);\n        option->user = g_strdup (env_user);\n    }\n    if (env_passwd && g_strcmp0 (env_passwd, \"\") != 0) {\n        g_free (option->passwd);\n        option->passwd = g_strdup (env_passwd);\n    }\n    if (env_host && g_strcmp0 (env_host, \"\") != 0) {\n        g_free (option->host);\n        option->host = g_strdup (env_host);\n    }\n    if (env_port && g_strcmp0(env_port, \"\") != 0) {\n        int port = atoi(env_port);\n        if (port > 0) {\n            option->port = port;\n        }\n    }\n    if (env_ccnet_db && g_strcmp0 (env_ccnet_db, \"\") != 0) {\n        g_free (option->ccnet_db_name);\n        option->ccnet_db_name = g_strdup (env_ccnet_db);\n    } else if (!option->ccnet_db_name) {\n        option->ccnet_db_name = g_strdup (\"ccnet_db\");\n        seaf_message (\"Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default\\n\");\n    }\n    if (env_seafile_db && g_strcmp0 (env_seafile_db, \"\") != 0) {\n        g_free (option->seafile_db_name);\n        option->seafile_db_name = g_strdup (env_seafile_db);\n    } else if (!option->seafile_db_name) {\n        option->seafile_db_name = g_strdup (\"seafile_db\");\n\t\tseaf_message (\"Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default\\n\");\n    }\n\n    return 0;\n}\n\nstatic DBOption *\nload_db_option (SeafileSession *session)\n{\n    GError *error = NULL;\n    int ret = 0;\n    DBOption *option = g_new0 (DBOption, 1);\n\n    option->host = seaf_key_file_get_string (session->config, \"database\", \"host\", NULL);\n\n    option->port = g_key_file_get_integer (session->config, \"database\", \"port\", &error);\n    if (error) {\n        g_clear_error (&error);\n        option->port = MYSQL_DEFAULT_PORT;\n    }\n\n    option->user = seaf_key_file_get_string (session->config, \"database\", \"user\", NULL);\n\n    option->passwd = seaf_key_file_get_string (session->config, \"database\", \"password\", NULL);\n\n    option->seafile_db_name = seaf_key_file_get_string (session->config, \"database\", \"db_name\", NULL);\n\n    option->use_ssl = g_key_file_get_boolean (session->config,\n                                      \"database\", \"use_ssl\", NULL);\n\n    option->skip_verify = g_key_file_get_boolean (session->config,\n                                          \"database\", \"skip_verify\", NULL);\n\n    if (option->use_ssl && !option->skip_verify) {\n        option->ca_path = seaf_key_file_get_string (session->config,\n                                            \"database\", \"ca_path\", NULL);\n        if (!option->ca_path) {\n            seaf_warning (\"ca_path is required if use ssl and don't skip verify.\\n\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    option->charset = seaf_key_file_get_string (session->config,\n                                     \"database\", \"connection_charset\", NULL);\n\n    option->max_connections = g_key_file_get_integer (session->config,\n                                              \"database\", \"max_connections\",\n                                              &error);\n    if (error || option->max_connections < 0) {\n        if (error)\n            g_clear_error (&error);\n        option->max_connections = DEFAULT_MAX_CONNECTIONS;\n    }\n\n    load_db_option_from_env (option);\n\n    if (!option->host) {\n        seaf_warning (\"DB host not set in config.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    if (!option->user) {\n        seaf_warning (\"DB user not set in config.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    if (!option->passwd) {\n        seaf_warning (\"DB passwd not set in config.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    if (!option->ccnet_db_name) {\n        seaf_warning (\"ccnet_db_name not set in config.\\n\");\n        ret = -1;\n        goto out;\n    }\n    if (!option->seafile_db_name) {\n        seaf_warning (\"db_name not set in config.\\n\");\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (ret < 0) {\n        db_option_free (option);\n        return NULL;\n    }\n\n    return option;\n}\n\nstatic int\nmysql_db_start (SeafileSession *session)\n{\n    DBOption *option = NULL;\n\n    option = load_db_option (session);\n    if (!option) {\n        seaf_warning (\"Failed to load database config.\\n\");\n        return -1;\n    }\n\n    session->db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->seafile_db_name,\n                                     NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections);\n    if (!session->db) {\n        db_option_free (option);\n        seaf_warning (\"Failed to start mysql db.\\n\");\n        return -1;\n    }\n\n    db_option_free (option);\n    return 0;\n}\n\n#endif\n\n#ifdef HAVE_POSTGRESQL\n\nstatic int\npgsql_db_start (SeafileSession *session)\n{\n    char *host, *user, *passwd, *db, *unix_socket;\n    unsigned int port;\n    GError *error = NULL;\n\n    host = seaf_key_file_get_string (session->config, \"database\", \"host\", &error);\n    if (!host) {\n        seaf_warning (\"DB host not set in config.\\n\");\n        return -1;\n    }\n\n    user = seaf_key_file_get_string (session->config, \"database\", \"user\", &error);\n    if (!user) {\n        seaf_warning (\"DB user not set in config.\\n\");\n        return -1;\n    }\n\n    passwd = seaf_key_file_get_string (session->config, \"database\", \"password\", &error);\n    if (!passwd) {\n        seaf_warning (\"DB passwd not set in config.\\n\");\n        return -1;\n    }\n\n    db = seaf_key_file_get_string (session->config, \"database\", \"db_name\", &error);\n    if (!db) {\n        seaf_warning (\"DB name not set in config.\\n\");\n        return -1;\n    }\n    port = g_key_file_get_integer (session->config,\n                                   \"database\", \"port\", &error);\n    if (error) {\n        port = 0;\n        g_clear_error (&error);\n    }\n\n    unix_socket = seaf_key_file_get_string (session->config,\n                                         \"database\", \"unix_socket\", &error);\n\n    session->db = seaf_db_new_pgsql (host, port, user, passwd, db, unix_socket,\n                                     DEFAULT_MAX_CONNECTIONS);\n    if (!session->db) {\n        seaf_warning (\"Failed to start pgsql db.\\n\");\n        return -1;\n    }\n\n    g_free (host);\n    g_free (user);\n    g_free (passwd);\n    g_free (db);\n    g_free (unix_socket);\n\n    return 0;\n}\n\n#endif\n\nint\nload_database_config (SeafileSession *session)\n{\n    char *type;\n    GError *error = NULL;\n    int ret = 0;\n    gboolean create_tables = FALSE;\n\n    type = seaf_key_file_get_string (session->config, \"database\", \"type\", &error);\n    /* Default to use mysql if not set. */\n    if (type && strcasecmp (type, \"sqlite\") == 0) {\n        ret = sqlite_db_start (session);\n    }\n#ifdef HAVE_MYSQL\n    else {\n        ret = mysql_db_start (session);\n    }\n#endif\n    if (ret == 0) {\n        if (g_key_file_has_key (session->config, \"database\", \"create_tables\", NULL))\n            create_tables = g_key_file_get_boolean (session->config,\n                                                    \"database\", \"create_tables\", NULL);\n        session->create_tables = create_tables;\n    }\n\n    g_free (type);\n\n    return ret;\n}\n\nstatic int\nccnet_init_sqlite_database (SeafileSession *session)\n{\n    char *db_path;\n\n    db_path = g_build_path (\"/\", session->ccnet_dir, CCNET_DB, NULL);\n    session->ccnet_db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);\n    if (!session->ccnet_db) {\n        seaf_warning (\"Failed to open ccnet database.\\n\");\n        return -1;\n    }\n    return 0;\n}\n\n#ifdef HAVE_MYSQL\n\nstatic int\nccnet_init_mysql_database (SeafileSession *session)\n{\n    DBOption *option = NULL;\n\n    option = load_db_option (session);\n    if (!option) {\n        seaf_warning (\"Failed to load database config.\\n\");\n        return -1;\n    }\n\n    session->ccnet_db = seaf_db_new_mysql (option->host, option->port, option->user, option->passwd, option->ccnet_db_name,\n                                           NULL, option->use_ssl, option->skip_verify, option->ca_path, option->charset, option->max_connections);\n    if (!session->ccnet_db) {\n        db_option_free (option);\n        seaf_warning (\"Failed to open ccnet database.\\n\");\n        return -1;\n    }\n\n    db_option_free (option);\n    return 0;\n}\n\n#endif\n\nint\nload_ccnet_database_config (SeafileSession *session)\n{\n    int ret;\n    char *engine;\n    gboolean create_tables = FALSE;\n\n    engine = ccnet_key_file_get_string (session->config, \"database\", \"type\");\n    if (engine && strcasecmp (engine, \"sqlite\") == 0) {\n        seaf_message (\"Use database sqlite\\n\");\n        ret = ccnet_init_sqlite_database (session);\n    }\n#ifdef HAVE_MYSQL\n    else {\n        seaf_message(\"Use database Mysql\\n\");\n        ret = ccnet_init_mysql_database (session);\n    }\n#endif\n    if (ret == 0) {\n        if (g_key_file_has_key (session->config, \"database\", \"create_tables\", NULL))\n            create_tables = g_key_file_get_boolean (session->config, \"database\", \"create_tables\", NULL);\n        session->ccnet_create_tables = create_tables;\n    }\n\n    g_free (engine);\n    return ret;\n}\n\n#ifdef FULL_FEATURE\n\nchar *\nseaf_gen_notif_server_jwt (const char *repo_id, const char *username)\n{\n    char *jwt_token = NULL;\n    gint64 now = (gint64)time(NULL);\n\n    jwt_t *jwt = NULL;\n\n    if (!seaf->notif_server_private_key) {\n        seaf_warning (\"No private key is configured for generating jwt token\\n\");\n        return NULL;\n    }\n\n    int ret = jwt_new (&jwt);\n    if (ret != 0 || jwt == NULL) {\n        seaf_warning (\"Failed to create jwt\\n\");\n        goto out;\n    }\n\n    ret = jwt_add_grant (jwt, \"repo_id\", repo_id);\n    if (ret != 0) {\n        seaf_warning (\"Failed to add repo_id to jwt\\n\");\n        goto out;\n    }\n    ret = jwt_add_grant (jwt, \"username\", username);\n    if (ret != 0) {\n        seaf_warning (\"Failed to add username to jwt\\n\");\n        goto out;\n    }\n    ret = jwt_add_grant_int (jwt, \"exp\", now + JWT_TOKEN_EXPIRE_TIME);\n    if (ret != 0) {\n        seaf_warning (\"Failed to expire time to jwt\\n\");\n        goto out;\n    }\n    ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->notif_server_private_key, strlen(seaf->notif_server_private_key));\n    if (ret != 0) {\n        seaf_warning (\"Failed to set alg\\n\");\n        goto out;\n    }\n\n    jwt_token = jwt_encode_str (jwt);\n\nout:\n    jwt_free (jwt);\n    return jwt_token;\n}\n#endif\n\nchar *\nseaf_parse_auth_token (const char *auth_token)\n{\n    char *token = NULL;\n    char **parts = NULL;\n\n    if (!auth_token) {\n        return NULL;\n    }\n\n    parts = g_strsplit (auth_token, \" \", 2);\n    if (!parts) {\n        return NULL;\n    }\n\n    if (g_strv_length (parts) < 2) {\n        g_strfreev (parts);\n        return NULL;\n    }\n\n    token = g_strdup(parts[1]);\n\n    g_strfreev (parts);\n    return token;\n}\n\nvoid\nsplit_filename (const char *filename, char **name, char **ext)\n{\n    char *dot;\n\n    dot = strrchr (filename, '.');\n    if (dot) {\n        *ext = g_strdup (dot + 1);\n        *name = g_strndup (filename, dot - filename);\n    } else {\n        *name = g_strdup (filename);\n        *ext = NULL;\n    }\n}\n\nstatic gboolean\ncollect_token_list (SeafDBRow *row, void *data)\n{\n    GList **p_tokens = data;\n    const char *token;\n\n    token = seaf_db_row_get_column_text (row, 0);\n    *p_tokens = g_list_prepend (*p_tokens, g_strdup(token));\n\n    return TRUE;\n}\n\nint\nseaf_delete_repo_tokens (SeafRepo *repo)\n{\n    int ret = 0;\n    const char *template;\n    GList *token_list = NULL;\n    GList *ptr;\n    GString *token_list_str = g_string_new (\"\");\n    GString *sql = g_string_new (\"\");\n    int rc;\n\n    template = \"SELECT u.token FROM RepoUserToken as u WHERE u.repo_id=?\";\n    rc = seaf_db_statement_foreach_row (seaf->db, template,\n                                        collect_token_list, &token_list,\n                                        1, \"string\", repo->id);\n    if (rc < 0) {\n        goto out;\n    }\n\n    if (rc == 0)\n        goto out;\n\n    for (ptr = token_list; ptr; ptr = ptr->next) {\n        const char *token = (char *)ptr->data;\n        if (token_list_str->len == 0)\n            g_string_append_printf (token_list_str, \"'%s'\", token);\n        else\n            g_string_append_printf (token_list_str, \",'%s'\", token);\n    }\n\n    /* Note that there is a size limit on sql query. In MySQL it's 1MB by default.\n     * Normally the token_list won't be that long.\n     */\n    g_string_printf (sql, \"DELETE FROM RepoUserToken WHERE token in (%s)\",\n                     token_list_str->str);\n    rc = seaf_db_statement_query (seaf->db, sql->str, 0);\n    if (rc < 0) {\n        goto out;\n    }\n\n    g_string_printf (sql, \"DELETE FROM RepoTokenPeerInfo WHERE token in (%s)\",\n                     token_list_str->str);\n    rc = seaf_db_statement_query (seaf->db, sql->str, 0);\n    if (rc < 0) {\n        goto out;\n    }\n\nout:\n    g_string_free (token_list_str, TRUE);\n    g_string_free (sql, TRUE);\n    g_list_free_full (token_list, (GDestroyNotify)g_free);\n\n    if (rc < 0) {\n        ret = -1;\n    }\n\n    return ret;\n}\n"
  },
  {
    "path": "common/seaf-utils.h",
    "content": "#ifndef SEAF_UTILS_H\n#define SEAF_UTILS_H\n\n#include <searpc-client.h>\n\nstruct _SeafileSession;\n\nchar *\nseafile_session_get_tmp_file_path (struct _SeafileSession *session,\n                                   const char *basename,\n                                   char path[]);\n\nint\nload_database_config (struct _SeafileSession *session);\n\nint\nload_ccnet_database_config (struct _SeafileSession *session);\n\n#ifdef FULL_FEATURE\n#endif\n\nchar *\nseaf_gen_notif_server_jwt (const char *repo_id, const char *username);\n\nchar *\nseaf_parse_auth_token (const char *auth_token);\n\nvoid\nsplit_filename (const char *filename, char **name, char **ext);\n\nint\nseaf_delete_repo_tokens (SeafRepo *repo);\n\n#endif\n"
  },
  {
    "path": "common/seafile-crypt.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include <string.h>\n#include <glib.h>\n#include \"seafile-crypt.h\"\n#include \"password-hash.h\"\n#include <openssl/rand.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n\n/*\n  The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a\n  weird choice of returned value.\n*/\n#define ENC_SUCCESS 1\n#define ENC_FAILURE 0\n#define DEC_SUCCESS 1\n#define DEC_FAILURE 0\n\n#define KEYGEN_ITERATION 1 << 19\n#define KEYGEN_ITERATION2 1000\n/* Should generate random salt for each repo. */\nstatic unsigned char salt[8] = { 0xda, 0x90, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 };\n\nSeafileCrypt *\nseafile_crypt_new (int version, unsigned char *key, unsigned char *iv)\n{\n    SeafileCrypt *crypt = g_new0 (SeafileCrypt, 1);\n    crypt->version = version;\n    if (version == 1)\n        memcpy (crypt->key, key, 16);\n    else\n        memcpy (crypt->key, key, 32);\n    memcpy (crypt->iv, iv, 16);\n    return crypt;\n}\n\nint\nseafile_derive_key (const char *data_in, int in_len, int version,\n                    const char *repo_salt,\n                    unsigned char *key, unsigned char *iv)\n{\n    if (version >= 3) {\n        unsigned char repo_salt_bin[32];\n        hex_to_rawdata (repo_salt, repo_salt_bin, 32);\n\n        PKCS5_PBKDF2_HMAC (data_in, in_len,\n                           repo_salt_bin, sizeof(repo_salt_bin),\n                           KEYGEN_ITERATION2,\n                           EVP_sha256(),\n                           32, key);\n        PKCS5_PBKDF2_HMAC ((char *)key, 32,\n                           repo_salt_bin, sizeof(repo_salt_bin),\n                           10,\n                           EVP_sha256(),\n                           16, iv);\n        return 0;\n    } else if (version == 2) {\n        PKCS5_PBKDF2_HMAC (data_in, in_len,\n                           salt, sizeof(salt),\n                           KEYGEN_ITERATION2,\n                           EVP_sha256(),\n                           32, key);\n        PKCS5_PBKDF2_HMAC ((char *)key, 32,\n                           salt, sizeof(salt),\n                           10,\n                           EVP_sha256(),\n                           16, iv);\n        return 0;\n    } else if (version == 1)\n        return EVP_BytesToKey (EVP_aes_128_cbc(), /* cipher mode */\n                               EVP_sha1(),        /* message digest */\n                               salt,              /* salt */\n                               (unsigned char*)data_in,\n                               in_len,\n                               KEYGEN_ITERATION,   /* iteration times */\n                               key, /* the derived key */\n                               iv); /* IV, initial vector */\n    else\n        return EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */\n                               EVP_sha1(),        /* message digest */\n                               NULL,              /* salt */\n                               (unsigned char*)data_in,\n                               in_len,\n                               3,   /* iteration times */\n                               key, /* the derived key */\n                               iv); /* IV, initial vector */\n}\n\nint\nseafile_generate_repo_salt (char *repo_salt)\n{\n    unsigned char repo_salt_bin[32];\n\n    int rc = RAND_bytes (repo_salt_bin, sizeof(repo_salt_bin));\n    if (rc != 1) {\n        seaf_warning (\"Failed to generate salt for repo encryption.\\n\");\n        return -1;\n    }\n\n    rawdata_to_hex (repo_salt_bin, repo_salt, 32);\n\n    return 0;\n}\n\nint\nseafile_generate_random_key (const char *passwd,\n                             int version,\n                             const char *repo_salt,\n                             char *random_key)\n{\n    SeafileCrypt *crypt;\n    unsigned char secret_key[32], *rand_key;\n    int outlen;\n    unsigned char key[32], iv[16];\n\n    int rc = RAND_bytes (secret_key, sizeof(secret_key));\n    if (rc != 1) {\n        seaf_warning (\"Failed to generate secret key for repo encryption.\\n\");\n        return -1;\n    }\n\n    seafile_derive_key (passwd, strlen(passwd), version, repo_salt, key, iv);\n\n    crypt = seafile_crypt_new (version, key, iv);\n\n    seafile_encrypt ((char **)&rand_key, &outlen,\n                     (char *)secret_key, sizeof(secret_key), crypt);\n\n    rawdata_to_hex (rand_key, random_key, 48);\n\n    g_free (crypt);\n    g_free (rand_key);\n\n    return 0;\n}\n\nvoid\nseafile_generate_magic (int version, const char *repo_id,\n                        const char *passwd,\n                        const char *repo_salt,\n                        char *magic)\n{\n    GString *buf = g_string_new (NULL);\n    unsigned char key[32], iv[16];\n\n    /* Compute a \"magic\" string from repo_id and passwd.\n     * This is used to verify the password given by user before decrypting\n     * data.\n     */\n    g_string_append_printf (buf, \"%s%s\", repo_id, passwd);\n\n    seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv);\n\n    g_string_free (buf, TRUE);\n    rawdata_to_hex (key, magic, 32);\n}\n\nvoid\nseafile_generate_pwd_hash (int version,\n                           const char *repo_id,\n                           const char *passwd,\n                           const char *repo_salt,\n                           const char *algo,\n                           const char *params_str,\n                           char *pwd_hash)\n{\n    GString *buf = g_string_new (NULL);\n    unsigned char key[32];\n\n    /* Compute a \"pwd_hash\" string from repo_id and passwd.\n     * This is used to verify the password given by user before decrypting\n     * data.\n     */\n    g_string_append_printf (buf, \"%s%s\", repo_id, passwd);\n\n    if (version <= 2) {\n        // use fixed repo salt\n        char fixed_salt[64] = {0};\n        rawdata_to_hex(salt, fixed_salt, 8);\n        pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key);\n    } else {\n        pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key);\n    }\n\n    g_string_free (buf, TRUE);\n    rawdata_to_hex (key, pwd_hash, 32);\n}\n\nint\nseafile_verify_repo_passwd (const char *repo_id,\n                            const char *passwd,\n                            const char *magic,\n                            int version,\n                            const char *repo_salt)\n{\n    GString *buf = g_string_new (NULL);\n    unsigned char key[32], iv[16];\n    char hex[65];\n\n    if (version != 1 && version != 2 && version != 3 && version != 4) {\n        seaf_warning (\"Unsupported enc_version %d.\\n\", version);\n        return -1;\n    }\n\n    /* Recompute the magic and compare it with the one comes with the repo. */\n    g_string_append_printf (buf, \"%s%s\", repo_id, passwd);\n\n    seafile_derive_key (buf->str, buf->len, version, repo_salt, key, iv);\n\n    g_string_free (buf, TRUE);\n    if (version >= 2)\n        rawdata_to_hex (key, hex, 32);\n    else\n        rawdata_to_hex (key, hex, 16);\n\n    if (g_strcmp0 (hex, magic) == 0)\n        return 0;\n    else\n        return -1;\n}\n\nint\nseafile_pwd_hash_verify_repo_passwd (int version,\n                                     const char *repo_id,\n                                     const char *passwd,\n                                     const char *repo_salt,\n                                     const char *pwd_hash,\n                                     const char *algo,\n                                     const char *params_str)\n{\n    GString *buf = g_string_new (NULL);\n    unsigned char key[32];\n    char hex[65];\n\n    g_string_append_printf (buf, \"%s%s\", repo_id, passwd);\n\n    if (version <= 2) {\n        // use fixed repo salt\n        char fixed_salt[64] = {0};\n        rawdata_to_hex(salt, fixed_salt, 8);\n        pwd_hash_derive_key (buf->str, buf->len, fixed_salt, algo, params_str, key);\n    } else {\n        pwd_hash_derive_key (buf->str, buf->len, repo_salt, algo, params_str, key);\n    }\n\n    g_string_free (buf, TRUE);\n    rawdata_to_hex (key, hex, 32);\n\n    if (g_strcmp0 (hex, pwd_hash) == 0)\n        return 0;\n    else\n        return -1;\n}\n\nint\nseafile_decrypt_repo_enc_key (int enc_version,\n                              const char *passwd, const char *random_key,\n                              const char *repo_salt,\n                              unsigned char *key_out, unsigned char *iv_out)\n{\n    unsigned char key[32], iv[16];\n\n    seafile_derive_key (passwd, strlen(passwd), enc_version, repo_salt, key, iv);\n\n    if (enc_version == 1) {\n        memcpy (key_out, key, 16);\n        memcpy (iv_out, iv, 16);\n        return 0;\n    } else if (enc_version >= 2) {\n        unsigned char enc_random_key[48], *dec_random_key;\n        int outlen;\n        SeafileCrypt *crypt;\n\n        if (random_key == NULL || random_key[0] == 0) {\n            seaf_warning (\"Empty random key.\\n\");\n            return -1;\n        }\n\n        hex_to_rawdata (random_key, enc_random_key, 48);\n\n        crypt = seafile_crypt_new (enc_version, key, iv);\n        if (seafile_decrypt ((char **)&dec_random_key, &outlen,\n                             (char *)enc_random_key, 48,\n                             crypt) < 0) {\n            seaf_warning (\"Failed to decrypt random key.\\n\");\n            g_free (crypt);\n            return -1;\n        }\n        g_free (crypt);\n\n        seafile_derive_key ((char *)dec_random_key, 32, enc_version,\n                            repo_salt,\n                            key, iv);\n        memcpy (key_out, key, 32);\n        memcpy (iv_out, iv, 16);\n\n        g_free (dec_random_key);\n        return 0;\n    }\n\n    return -1;\n}\n\nint\nseafile_update_random_key (const char *old_passwd, const char *old_random_key,\n                           const char *new_passwd, char *new_random_key,\n                           int enc_version, const char *repo_salt)\n{\n    unsigned char key[32], iv[16];\n    unsigned char random_key_raw[48], *secret_key, *new_random_key_raw;\n    int secret_key_len, random_key_len;\n    SeafileCrypt *crypt;\n\n    /* First, use old_passwd to decrypt secret key from old_random_key. */\n    seafile_derive_key (old_passwd, strlen(old_passwd), enc_version,\n                        repo_salt, key, iv);\n\n    hex_to_rawdata (old_random_key, random_key_raw, 48);\n\n    crypt = seafile_crypt_new (enc_version, key, iv);\n    if (seafile_decrypt ((char **)&secret_key, &secret_key_len,\n                         (char *)random_key_raw, 48,\n                         crypt) < 0) {\n        seaf_warning (\"Failed to decrypt random key.\\n\");\n        g_free (crypt);\n        return -1;\n    }\n    g_free (crypt);\n\n    /* Second, use new_passwd to encrypt secret key. */\n    seafile_derive_key (new_passwd, strlen(new_passwd), enc_version,\n                        repo_salt, key, iv);\n\n    crypt = seafile_crypt_new (enc_version, key, iv);\n\n    seafile_encrypt ((char **)&new_random_key_raw, &random_key_len,\n                     (char *)secret_key, secret_key_len, crypt);\n\n    rawdata_to_hex (new_random_key_raw, new_random_key, 48);\n\n    g_free (secret_key);\n    g_free (new_random_key_raw);\n    g_free (crypt);\n\n    return 0;\n}\n\nint\nseafile_encrypt (char **data_out,\n                 int *out_len,\n                 const char *data_in,\n                 const int in_len,\n                 SeafileCrypt *crypt)\n{\n    *data_out = NULL;\n    *out_len = -1;\n\n    /* check validation */\n    if ( data_in == NULL || in_len <= 0 || crypt == NULL) {\n        seaf_warning (\"Invalid params.\\n\");\n        return -1;\n    }\n\n    EVP_CIPHER_CTX *ctx;\n    int ret;\n    int blks;\n\n    /* Prepare CTX for encryption. */\n    ctx = EVP_CIPHER_CTX_new ();\n\n    if (crypt->version == 1)\n        ret = EVP_EncryptInit_ex (ctx,\n                                  EVP_aes_128_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n    else if (crypt->version == 3)\n        ret = EVP_EncryptInit_ex (ctx,\n                                  EVP_aes_128_ecb(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n    else\n        ret = EVP_EncryptInit_ex (ctx,\n                                  EVP_aes_256_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n\n    if (ret == ENC_FAILURE) {\n        EVP_CIPHER_CTX_free (ctx);\n        return -1;\n    }\n    /* Allocating output buffer. */\n    \n    /*\n      For EVP symmetric encryption, padding is always used __even if__\n      data size is a multiple of block size, in which case the padding\n      length is the block size. so we have the following:\n    */\n    \n    blks = (in_len / BLK_SIZE) + 1;\n\n    *data_out = (char *)g_malloc (blks * BLK_SIZE);\n\n    if (*data_out == NULL) {\n        seaf_warning (\"failed to allocate the output buffer.\\n\");\n        goto enc_error;\n    }                \n\n    int update_len, final_len;\n\n    /* Do the encryption. */\n    ret = EVP_EncryptUpdate (ctx,\n                             (unsigned char*)*data_out,\n                             &update_len,\n                             (unsigned char*)data_in,\n                             in_len);\n\n    if (ret == ENC_FAILURE)\n        goto enc_error;\n\n\n    /* Finish the possible partial block. */\n    ret = EVP_EncryptFinal_ex (ctx,\n                               (unsigned char*)*data_out + update_len,\n                               &final_len);\n\n    *out_len = update_len + final_len;\n\n    /* out_len should be equal to the allocated buffer size. */\n    if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE))\n        goto enc_error;\n    \n    EVP_CIPHER_CTX_free (ctx);\n\n    return 0;\n\nenc_error:\n\n    EVP_CIPHER_CTX_free (ctx);\n\n    *out_len = -1;\n\n    if (*data_out != NULL)\n        g_free (*data_out);\n\n    *data_out = NULL;\n\n    return -1;\n    \n}\n                               \n\n    \n\nint\nseafile_decrypt (char **data_out,\n                 int *out_len,\n                 const char *data_in,\n                 const int in_len,\n                 SeafileCrypt *crypt)\n{\n    *data_out = NULL;\n    *out_len = -1;\n\n    /* Check validation. Because padding is always used, in_len must\n     * be a multiple of BLK_SIZE */\n    if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 ||\n         crypt == NULL) {\n\n        seaf_warning (\"Invalid param(s).\\n\");\n        return -1;\n    }\n\n    EVP_CIPHER_CTX *ctx;\n    int ret;\n\n    /* Prepare CTX for decryption. */\n    ctx = EVP_CIPHER_CTX_new ();\n\n    if (crypt->version == 1)\n        ret = EVP_DecryptInit_ex (ctx,\n                                  EVP_aes_128_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n    else if (crypt->version == 3)\n        ret = EVP_DecryptInit_ex (ctx,\n                                  EVP_aes_128_ecb(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n    else\n        ret = EVP_DecryptInit_ex (ctx,\n                                  EVP_aes_256_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  crypt->key,  /* derived key */\n                                  crypt->iv);  /* initial vector */\n\n    if (ret == DEC_FAILURE) {\n        EVP_CIPHER_CTX_free (ctx);\n        return -1;\n    }\n    /* Allocating output buffer. */\n    \n    *data_out = (char *)g_malloc (in_len);\n\n    if (*data_out == NULL) {\n        seaf_warning (\"failed to allocate the output buffer.\\n\");\n        goto dec_error;\n    }                \n\n    int update_len, final_len;\n\n    /* Do the decryption. */\n    ret = EVP_DecryptUpdate (ctx,\n                             (unsigned char*)*data_out,\n                             &update_len,\n                             (unsigned char*)data_in,\n                             in_len);\n\n    if (ret == DEC_FAILURE)\n        goto dec_error;\n\n\n    /* Finish the possible partial block. */\n    ret = EVP_DecryptFinal_ex (ctx,\n                               (unsigned char*)*data_out + update_len,\n                               &final_len);\n\n    *out_len = update_len + final_len;\n\n    /* out_len should be smaller than in_len. */\n    if (ret == DEC_FAILURE || *out_len > in_len)\n        goto dec_error;\n\n    EVP_CIPHER_CTX_free (ctx);\n    \n    return 0;\n\ndec_error:\n\n    EVP_CIPHER_CTX_free (ctx);\n\n    *out_len = -1;\n    if (*data_out != NULL)\n        g_free (*data_out);\n\n    *data_out = NULL;\n\n    return -1;\n    \n}\n\nint\nseafile_decrypt_init (EVP_CIPHER_CTX **ctx,\n                      int version,\n                      const unsigned char *key,\n                      const unsigned char *iv)\n{\n    int ret;\n\n    /* Prepare CTX for decryption. */\n    *ctx = EVP_CIPHER_CTX_new ();\n\n    if (version == 1)\n        ret = EVP_DecryptInit_ex (*ctx,\n                                  EVP_aes_128_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  key,  /* derived key */\n                                  iv);  /* initial vector */\n    else if (version == 3)\n        ret = EVP_DecryptInit_ex (*ctx,\n                                  EVP_aes_128_ecb(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  key,  /* derived key */\n                                  iv);  /* initial vector */\n    else\n        ret = EVP_DecryptInit_ex (*ctx,\n                                  EVP_aes_256_cbc(), /* cipher mode */\n                                  NULL, /* engine, NULL for default */\n                                  key,  /* derived key */\n                                  iv);  /* initial vector */\n\n    if (ret == DEC_FAILURE)\n        return -1;\n\n    return 0;\n}\n"
  },
  {
    "path": "common/seafile-crypt.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n/*\n  Description:\n  \n  The function pair \"seafile_encrypt/seafile_decrypt\" are used to\n  encrypt/decrypt data in the seafile system, using AES 128 bit ecb\n  algorithm provided by openssl.\n*/  \n\n#ifndef _SEAFILE_CRYPT_H\n#define _SEAFILE_CRYPT_H\n\n#include <openssl/aes.h>\n#include <openssl/evp.h>\n\n\n/* Block size, in bytes. For AES it can only be 16 bytes. */\n#define BLK_SIZE 16\n#define ENCRYPT_BLK_SIZE BLK_SIZE\n\nstruct SeafileCrypt {\n    int version;\n    unsigned char key[32];   /* set when enc_version >= 1 */\n    unsigned char iv[16];\n};\n\ntypedef struct SeafileCrypt SeafileCrypt;\n\nSeafileCrypt *\nseafile_crypt_new (int version, unsigned char *key, unsigned char *iv);\n\n/*\n  Derive key and iv used by AES encryption from @data_in.\n  key and iv is 16 bytes for version 1, and 32 bytes for version 2.\n\n  @data_out: pointer to the output of the encrpyted/decrypted data,\n  whose content must be freed by g_free when not used.\n\n  @out_len: pointer to length of output, in bytes\n\n  @data_in: address of input buffer\n\n  @in_len: length of data to be encrpyted/decrypted, in bytes \n\n  @crypt: container of crypto info.\n  \n  RETURN VALUES:\n\n  On success, 0 is returned, and the encrpyted/decrypted data is in\n  *data_out, with out_len set to its length. On failure, -1 is returned\n  and *data_out is set to NULL, with out_len set to -1;\n*/\n\nint\nseafile_derive_key (const char *data_in, int in_len, int version,\n                    const char *repo_salt,\n                    unsigned char *key, unsigned char *iv);\n\n/* @salt must be an char array of size 65 bytes. */\nint\nseafile_generate_repo_salt (char *repo_salt);\n\n/*\n * Generate the real key used to encrypt data.\n * The key 32 bytes long and encrpted with @passwd.\n */\nint\nseafile_generate_random_key (const char *passwd,\n                             int version,\n                             const char *repo_salt,\n                             char *random_key);\n\nvoid\nseafile_generate_magic (int version, const char *repo_id,\n                        const char *passwd,\n                        const char *repo_salt,\n                        char *magic);\n\nvoid\nseafile_generate_pwd_hash (int version,\n                           const char *repo_id,\n                           const char *passwd,\n                           const char *repo_salt,\n                           const char *algo,\n                           const char *params_str,\n                           char *pwd_hash);\n\nint\nseafile_verify_repo_passwd (const char *repo_id,\n                            const char *passwd,\n                            const char *magic,\n                            int version,\n                            const char *repo_salt);\n\nint\nseafile_pwd_hash_verify_repo_passwd (int version,\n                                     const char *repo_id,\n                                     const char *passwd,\n                                     const char *repo_salt,\n                                     const char *pwd_hash,\n                                     const char *algo,\n                                     const char *params_str);\n\nint\nseafile_decrypt_repo_enc_key (int enc_version,\n                              const char *passwd, const char *random_key,\n                              const char *repo_salt,\n                              unsigned char *key_out, unsigned char *iv_out);\n\nint\nseafile_update_random_key (const char *old_passwd, const char *old_random_key,\n                           const char *new_passwd, char *new_random_key,\n                           int enc_version, const char *repo_salt);\n\nint\nseafile_encrypt (char **data_out,\n                 int *out_len,\n                 const char *data_in,\n                 const int in_len,\n                 SeafileCrypt *crypt);\n\n\nint\nseafile_decrypt (char **data_out,\n                 int *out_len,\n                 const char *data_in,\n                 const int in_len,\n                 SeafileCrypt *crypt);\n\nint\nseafile_decrypt_init (EVP_CIPHER_CTX **ctx,\n                      int version,\n                      const unsigned char *key,\n                      const unsigned char *iv);\n\n#endif  /* _SEAFILE_CRYPT_H */\n"
  },
  {
    "path": "common/sync-repo-common.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SYNC_REPO_COMMON\n#define SYNC_REPO_COMMON\n\n#define SC_COMMIT_ID    \"300\"\n#define SS_COMMIT_ID    \"Commit ID\"\n#define SC_NO_REPO      \"301\"\n#define SS_NO_REPO      \"No such repo\"\n#define SC_NO_BRANCH    \"302\"\n#define SS_NO_BRANCH    \"No such branch\"\n#define SC_NO_DSYNC     \"303\"\n#define SS_NO_DSYNC     \"Not double sync\"\n#define SC_REPO_CORRUPT     \"304\"\n#define SS_REPO_CORRUPT     \"Repo corrupted\"\n\n#define SC_SERVER_ERROR     \"401\"\n#define SS_SERVER_ERROR     \"Internal server error\"\n\n#endif\n"
  },
  {
    "path": "common/user-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <sys/stat.h>\n#include <dirent.h>\n\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"seafile-error.h\"\n#include \"user-mgr.h\"\n#include \"seaf-db.h\"\n#include \"seaf-utils.h\"\n\n#include <openssl/sha.h>\n#include <openssl/rand.h>\n#include <openssl/evp.h>\n\n#define DEBUG_FLAG  CCNET_DEBUG_PEER\n#include \"log.h\"\n\n#define DEFAULT_SAVING_INTERVAL_MSEC 30000\n\n#define DEFAULT_MAX_CONNECTIONS 100\n\nG_DEFINE_TYPE (CcnetUserManager, ccnet_user_manager, G_TYPE_OBJECT);\n\n\n#define GET_PRIV(o)  \\\n   (G_TYPE_INSTANCE_GET_PRIVATE ((o), CCNET_TYPE_USER_MANAGER, CcnetUserManagerPriv))\n\n\nstatic int open_db (CcnetUserManager *manager);\n\nstruct CcnetUserManagerPriv {\n    CcnetDB    *db;\n    int         max_users;\n};\n\nstatic void\nccnet_user_manager_class_init (CcnetUserManagerClass *klass)\n{\n\n    g_type_class_add_private (klass, sizeof (CcnetUserManagerPriv));\n}\n\nstatic void\nccnet_user_manager_init (CcnetUserManager *manager)\n{\n    manager->priv = GET_PRIV(manager);\n}\n\nCcnetUserManager*\nccnet_user_manager_new (SeafileSession *session)\n{\n    CcnetUserManager* manager;\n\n    manager = g_object_new (CCNET_TYPE_USER_MANAGER, NULL);\n    manager->session = session;\n    manager->user_hash = g_hash_table_new (g_str_hash, g_str_equal);\n\n    return manager;\n}\n\n#define DEFAULT_PASSWD_HASH_ITER 10000\n\n// return current active user number\nstatic int\nget_current_user_number (CcnetUserManager *manager)\n{\n    int total = 0, count;\n\n    count = ccnet_user_manager_count_emailusers (manager, \"DB\");\n    if (count < 0) {\n        ccnet_warning (\"Failed to get user number from DB.\\n\");\n        return -1;\n    }\n    total += count;\n\n    return total;\n}\n\nstatic gboolean\ncheck_user_number (CcnetUserManager *manager, gboolean allow_equal)\n{\n    if (manager->priv->max_users == 0) {\n        return TRUE;\n    }\n\n    int cur_num = get_current_user_number (manager);\n    if (cur_num < 0) {\n        return FALSE;\n    }\n\n    if ((allow_equal && cur_num > manager->priv->max_users) ||\n        (!allow_equal && cur_num >= manager->priv->max_users)) {\n        ccnet_warning (\"The number of users exceeds limit, max %d, current %d\\n\",\n                       manager->priv->max_users, cur_num);\n        return FALSE;\n    }\n\n    return TRUE;\n}\n\nint\nccnet_user_manager_prepare (CcnetUserManager *manager)\n{\n    int ret;\n\n    manager->passwd_hash_iter = DEFAULT_PASSWD_HASH_ITER;\n\n    manager->userdb_path = g_build_filename (manager->session->ccnet_dir,\n                                             \"user-db\", NULL);\n    ret = open_db(manager);\n    if (ret < 0)\n        return ret;\n\n    if (!check_user_number (manager, TRUE)) {\n        return -1;\n    }\n\n    return 0;\n}\n\nvoid\nccnet_user_manager_free (CcnetUserManager *manager)\n{\n    g_object_unref (manager);\n}\n\nvoid\nccnet_user_manager_start (CcnetUserManager *manager)\n{\n\n}\n\nvoid ccnet_user_manager_on_exit (CcnetUserManager *manager)\n{\n}\n\nvoid\nccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users)\n{\n    manager->priv->max_users = max_users;\n}\n\n/* -------- DB Operations -------- */\n\nstatic int check_db_table (SeafDB *db)\n{\n    char *sql;\n\n    int db_type = seaf_db_type (db);\n    if (db_type == SEAF_DB_TYPE_MYSQL) {\n        sql = \"CREATE TABLE IF NOT EXISTS EmailUser (\"\n            \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"email VARCHAR(255), passwd VARCHAR(256), \"\n            \"is_staff BOOL NOT NULL, is_active BOOL NOT NULL, \"\n            \"ctime BIGINT, reference_id VARCHAR(255),\"\n            \"UNIQUE INDEX (email), UNIQUE INDEX (reference_id))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        sql = \"CREATE TABLE IF NOT EXISTS Binding (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"email VARCHAR(255), peer_id CHAR(41),\"\n            \"UNIQUE INDEX (peer_id), INDEX (email(20)))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserRole (\"\n          \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n          \"email VARCHAR(255), role VARCHAR(255), UNIQUE INDEX (email)) \"\n          \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS LDAPConfig ( \"\n          \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, cfg_group VARCHAR(255) NOT NULL,\"\n          \"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER) ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n    } else if (db_type == SEAF_DB_TYPE_SQLITE) {\n        sql = \"CREATE TABLE IF NOT EXISTS EmailUser (\"\n            \"id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\"\n            \"email TEXT, passwd TEXT, is_staff bool NOT NULL, \"\n            \"is_active bool NOT NULL, ctime INTEGER, \"\n            \"reference_id TEXT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS email_index on Binding (email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL,\"\n          \"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n    } else if (db_type == SEAF_DB_TYPE_PGSQL) {\n        sql = \"CREATE TABLE IF NOT EXISTS EmailUser (\"\n            \"id SERIAL PRIMARY KEY, \"\n            \"email VARCHAR(255), passwd VARCHAR(256), \"\n            \"is_staff INTEGER NOT NULL, is_active INTEGER NOT NULL, \"\n            \"ctime BIGINT, reference_id VARCHAR(255), UNIQUE (email))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"emailuser_reference_id_idx\")) {\n        //    sql = \"CREATE UNIQUE INDEX emailuser_reference_id_idx ON EmailUser (reference_id)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n\n        sql = \"CREATE TABLE IF NOT EXISTS Binding (email VARCHAR(255), peer_id CHAR(41),\"\n            \"UNIQUE (peer_id))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserRole (email VARCHAR(255), \"\n          \" role VARCHAR(255), UNIQUE (email, role))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        //if (!pgsql_index_exists (db, \"userrole_email_idx\")) {\n        //    sql = \"CREATE INDEX userrole_email_idx ON UserRole (email)\";\n        //    if (seaf_db_query (db, sql) < 0)\n        //        return -1;\n        //}\n\n        sql = \"CREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL,\"\n          \"cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    }\n\n    return 0;\n}\n\n\nstatic CcnetDB *\nopen_sqlite_db (CcnetUserManager *manager)\n{\n    CcnetDB *db = NULL;\n    char *db_dir;\n    char *db_path;\n\n    db_dir = g_build_filename (manager->session->ccnet_dir, \"PeerMgr\", NULL);\n    if (checkdir_with_mkdir(db_dir) < 0) {\n        ccnet_error (\"Cannot open db dir %s: %s\\n\", db_dir,\n                     strerror(errno));\n        return NULL;\n    }\n    g_free (db_dir);\n\n    db_path = g_build_filename (manager->session->ccnet_dir, \"PeerMgr\",\n                                \"usermgr.db\", NULL);\n    db = seaf_db_new_sqlite (db_path, DEFAULT_MAX_CONNECTIONS);\n    g_free (db_path);\n\n    return db;\n}\n\nstatic int\nopen_db (CcnetUserManager *manager)\n{\n    CcnetDB *db = NULL;\n\n    switch (seaf_db_type(manager->session->ccnet_db)) {\n    /* To be compatible with the db file layout of 0.9.1 version,\n     * we don't use conf-dir/ccnet.db for user and peer info, but\n     * user conf-dir/PeerMgr/peermgr.db and conf-dir/PeerMgr/usermgr.db instead.\n     */\n    case SEAF_DB_TYPE_SQLITE:\n        db = open_sqlite_db (manager);\n        break;\n    case SEAF_DB_TYPE_PGSQL:\n    case SEAF_DB_TYPE_MYSQL:\n        db = manager->session->ccnet_db;\n        break;\n    }\n\n    if (!db)\n        return -1;\n\n    manager->priv->db = db;\n    if ((manager->session->ccnet_create_tables || seaf_db_type(db) == SEAF_DB_TYPE_PGSQL)\n        && check_db_table (db) < 0) {\n        ccnet_warning (\"Failed to create user db tables.\\n\");\n        return -1;\n    }\n    return 0;\n}\n\n\n/* -------- EmailUser Management -------- */\n\n/* This fixed salt is used in very early versions. It's kept for compatibility.\n * For the current password hashing algorithm, please see hash_password_pbkdf2_sha256()\n */\nstatic unsigned char salt[8] = { 0xdb, 0x91, 0x45, 0xc3, 0x06, 0xc7, 0xcc, 0x26 };\n\nstatic void\nhash_password (const char *passwd, char *hashed_passwd)\n{\n    unsigned char sha1[20];\n    SHA_CTX s;\n\n    SHA1_Init (&s);\n    SHA1_Update (&s, passwd, strlen(passwd));\n    SHA1_Final (sha1, &s);\n    rawdata_to_hex (sha1, hashed_passwd, 20);\n}\n\nstatic void\nhash_password_salted (const char *passwd, char *hashed_passwd)\n{\n    unsigned char sha[SHA256_DIGEST_LENGTH];\n    SHA256_CTX s;\n\n    SHA256_Init (&s);\n    SHA256_Update (&s, passwd, strlen(passwd));\n    SHA256_Update (&s, salt, sizeof(salt));\n    SHA256_Final (sha, &s);\n    rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);\n}\n\nstatic void\nhash_password_pbkdf2_sha256 (const char *passwd,\n                             int iterations,\n                             char **db_passwd)\n{\n    guint8 sha[SHA256_DIGEST_LENGTH];\n    guint8 salt[SHA256_DIGEST_LENGTH];\n    char hashed_passwd[SHA256_DIGEST_LENGTH*2+1];\n    char salt_str[SHA256_DIGEST_LENGTH*2+1];\n\n    if (!RAND_bytes (salt, sizeof(salt))) {\n        ccnet_warning (\"Failed to generate salt \"\n                       \"with RAND_bytes(), use RAND_pseudo_bytes().\\n\");\n        RAND_pseudo_bytes (salt, sizeof(salt));\n    }\n\n    PKCS5_PBKDF2_HMAC (passwd, strlen(passwd),\n                       salt, sizeof(salt),\n                       iterations,\n                       EVP_sha256(),\n                       sizeof(sha), sha);\n\n    rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);\n\n    rawdata_to_hex (salt, salt_str, SHA256_DIGEST_LENGTH);\n\n    /* Encode password hash related information into one string, similar to Django. */\n    GString *buf = g_string_new (NULL);\n    g_string_printf (buf, \"PBKDF2SHA256$%d$%s$%s\",\n                     iterations, salt_str, hashed_passwd);\n    *db_passwd = g_string_free (buf, FALSE);\n}\n\nstatic gboolean\nvalidate_passwd_pbkdf2_sha256 (const char *passwd, const char *db_passwd)\n{\n    char **tokens;\n    char *salt_str, *hash;\n    int iter;\n    guint8 sha[SHA256_DIGEST_LENGTH];\n    guint8 salt[SHA256_DIGEST_LENGTH];\n    char hashed_passwd[SHA256_DIGEST_LENGTH*2+1];\n\n    if (g_strcmp0 (db_passwd, \"!\") == 0)\n        return FALSE;\n\n    tokens = g_strsplit (db_passwd, \"$\", -1);\n    if (!tokens || g_strv_length (tokens) != 4) {\n        if (tokens)\n            g_strfreev (tokens);\n        ccnet_warning (\"Invalide db passwd format %s.\\n\", db_passwd);\n        return FALSE;\n    }\n\n    iter = atoi (tokens[1]);\n    salt_str = tokens[2];\n    hash = tokens[3];\n\n    hex_to_rawdata (salt_str, salt, SHA256_DIGEST_LENGTH);\n\n    PKCS5_PBKDF2_HMAC (passwd, strlen(passwd),\n                       salt, sizeof(salt),\n                       iter,\n                       EVP_sha256(),\n                       sizeof(sha), sha);\n    rawdata_to_hex (sha, hashed_passwd, SHA256_DIGEST_LENGTH);\n\n    gboolean ret = (strcmp (hash, hashed_passwd) == 0);\n\n    g_strfreev (tokens);\n    return ret;\n}\n\nstatic gboolean\nvalidate_passwd (const char *passwd, const char *stored_passwd,\n                 gboolean *need_upgrade)\n{\n    char hashed_passwd[SHA256_DIGEST_LENGTH * 2 + 1];\n    int hash_len = strlen(stored_passwd);\n\n    *need_upgrade = FALSE;\n\n    if (hash_len == SHA256_DIGEST_LENGTH * 2) {\n        hash_password_salted (passwd, hashed_passwd);\n        *need_upgrade = TRUE;\n    } else if (hash_len == SHA_DIGEST_LENGTH * 2) {\n        hash_password (passwd, hashed_passwd);\n        *need_upgrade = TRUE;\n    } else {\n        return validate_passwd_pbkdf2_sha256 (passwd, stored_passwd);\n    }\n\n    if (strcmp (hashed_passwd, stored_passwd) == 0)\n        return TRUE;\n    else\n        return FALSE;\n}\n\nstatic int\nupdate_user_passwd (CcnetUserManager *manager,\n                    const char *email, const char *passwd)\n{\n    CcnetDB *db = manager->priv->db;\n    char *db_passwd = NULL;\n    int ret;\n\n    hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter,\n                                 &db_passwd);\n\n    /* convert email to lower case for case insensitive lookup. */\n    char *email_down = g_ascii_strdown (email, strlen(email));\n\n    ret = seaf_db_statement_query (db,\n                                    \"UPDATE EmailUser SET passwd=? WHERE email=?\",\n                                    2, \"string\", db_passwd, \"string\", email_down);\n\n    g_free (db_passwd);\n    g_free (email_down);\n\n    if (ret < 0)\n        return ret;\n\n    return 0;\n}\n\nint\nccnet_user_manager_add_emailuser (CcnetUserManager *manager,\n                                  const char *email,\n                                  const char *passwd,\n                                  int is_staff, int is_active)\n{\n    CcnetDB *db = manager->priv->db;\n    gint64 now = get_current_time();\n    char *db_passwd = NULL;\n    int ret;\n\n    if (!check_user_number (manager, FALSE)) {\n        return -1;\n    }\n\n    /* A user with unhashed \"!\" as password cannot be logged in.\n     * Such users are created for book keeping, such as users from\n     * Shibboleth.\n     */\n    if (g_strcmp0 (passwd, \"!\") != 0)\n        hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter,\n                                     &db_passwd);\n    else\n        db_passwd = g_strdup(passwd);\n\n    /* convert email to lower case for case insensitive lookup. */\n    char *email_down = g_ascii_strdown (email, strlen(email));\n\n    ret = seaf_db_statement_query (db,\n                                    \"INSERT INTO EmailUser(email, passwd, is_staff, \"\n                                    \"is_active, ctime) VALUES (?, ?, ?, ?, ?)\",\n                                    5, \"string\", email_down, \"string\", db_passwd,\n                                    \"int\", is_staff, \"int\", is_active, \"int64\", now);\n\n    g_free (db_passwd);\n    g_free (email_down);\n\n    if (ret < 0)\n        return ret;\n\n    return 0;\n}\n\nint\nccnet_user_manager_remove_emailuser (CcnetUserManager *manager,\n                                     const char *source,\n                                     const char *email)\n{\n    CcnetDB *db = manager->priv->db;\n    int ret;\n\n    seaf_db_statement_query (db,\n                              \"DELETE FROM UserRole WHERE email=?\",\n                              1, \"string\", email);\n\n    if (strcmp (source, \"DB\") == 0) {\n        ret = seaf_db_statement_query (db,\n                                        \"DELETE FROM EmailUser WHERE email=?\",\n                                        1, \"string\", email);\n        return ret;\n    }\n\n    return -1;\n}\n\nstatic gboolean\nget_password (CcnetDBRow *row, void *data)\n{\n    char **p_passwd = data;\n\n    *p_passwd = g_strdup(seaf_db_row_get_column_text (row, 0));\n    return FALSE;\n}\n\nint\nccnet_user_manager_validate_emailuser (CcnetUserManager *manager,\n                                       const char *email,\n                                       const char *passwd)\n{\n    CcnetDB *db = manager->priv->db;\n    int ret = -1;\n    char *sql;\n    char *email_down;\n    char *login_id;\n    char *stored_passwd = NULL;\n    gboolean need_upgrade = FALSE;\n\n    /* Users with password \"!\" are for internal book keeping only. */\n    if (g_strcmp0 (passwd, \"!\") == 0)\n        return -1;\n\n    login_id = ccnet_user_manager_get_login_id (manager, email);\n    if (!login_id) {\n        ccnet_warning (\"Failed to get login_id for %s\\n\", email);\n        return -1;\n    }\n\n    sql = \"SELECT passwd FROM EmailUser WHERE email=?\";\n    if (seaf_db_statement_foreach_row (db, sql,\n                                        get_password, &stored_passwd,\n                                        1, \"string\", login_id) > 0) {\n        if (validate_passwd (passwd, stored_passwd, &need_upgrade)) {\n            if (need_upgrade)\n                update_user_passwd (manager, login_id, passwd);\n            ret = 0;\n            goto out;\n        } else {\n            goto out;\n        }\n    }\n\n    email_down = g_ascii_strdown (email, strlen(login_id));\n    if (seaf_db_statement_foreach_row (db, sql,\n                                        get_password, &stored_passwd,\n                                        1, \"string\", email_down) > 0) {\n        g_free (email_down);\n        if (validate_passwd (passwd, stored_passwd, &need_upgrade)) {\n            if (need_upgrade)\n                update_user_passwd (manager, login_id, passwd);\n            ret = 0;\n            goto out;\n        } else {\n            goto out;\n        }\n    }\n    g_free (email_down);\n\nout:\n\n    g_free (login_id);\n    g_free (stored_passwd);\n\n    return ret;\n}\n\nstatic gboolean\nget_emailuser_cb (CcnetDBRow *row, void *data)\n{\n    CcnetEmailUser **p_emailuser = data;\n\n    int id = seaf_db_row_get_column_int (row, 0);\n    const char *email = (const char *)seaf_db_row_get_column_text (row, 1);\n    int is_staff = seaf_db_row_get_column_int (row, 2);\n    int is_active = seaf_db_row_get_column_int (row, 3);\n    gint64 ctime = seaf_db_row_get_column_int64 (row, 4);\n    const char *password = seaf_db_row_get_column_text (row, 5);\n    const char *reference_id = seaf_db_row_get_column_text (row, 6);\n    const char *role = seaf_db_row_get_column_text (row, 7);\n\n    char *email_l = g_ascii_strdown (email, -1);\n    *p_emailuser = g_object_new (CCNET_TYPE_EMAIL_USER,\n                                 \"id\", id,\n                                 \"email\", email_l,\n                                 \"is_staff\", is_staff,\n                                 \"is_active\", is_active,\n                                 \"ctime\", ctime,\n                                 \"source\", \"DB\",\n                                 \"password\", password,\n                                 \"reference_id\", reference_id,\n                                 \"role\", role ? role : \"\",\n                                 NULL);\n    g_free (email_l);\n\n    return FALSE;\n}\n\nstatic char*\nccnet_user_manager_get_role_emailuser (CcnetUserManager *manager,\n                                     const char* email);\n\nstatic CcnetEmailUser*\nget_emailuser (CcnetUserManager *manager,\n               const char *email,\n               gboolean import,\n               GError **error)\n{\n    CcnetDB *db = manager->priv->db;\n    char *sql;\n    CcnetEmailUser *emailuser = NULL;\n    char *email_down;\n    int rc;\n\n    sql = \"SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role \"\n        \" FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email \"\n        \" WHERE e.email=?\";\n    rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,\n                                         1, \"string\", email);\n    if (rc > 0) {\n        return emailuser;\n    } else if (rc < 0) {\n        if (error) {\n            g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Database error\");\n        }\n        return NULL;\n    }\n\n    email_down = g_ascii_strdown (email, strlen(email));\n    rc = seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,\n                                         1, \"string\", email_down);\n    if (rc > 0) {\n        g_free (email_down);\n        return emailuser;\n    } else if (rc < 0) {\n        if (error) {\n            g_set_error (error, CCNET_DOMAIN, CCNET_ERR_INTERNAL, \"Database error\");\n        }\n        g_free (email_down);\n        return NULL;\n    }\n\n    g_free (email_down);\n\n    return NULL;\n\n}\n\nCcnetEmailUser*\nccnet_user_manager_get_emailuser (CcnetUserManager *manager,\n                                  const char *email,\n                                  GError **error)\n{\n    return get_emailuser (manager, email, FALSE, error);\n}\n\nCcnetEmailUser*\nccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager,\n                                              const char *email,\n                                              GError **error)\n{\n    return get_emailuser (manager, email, TRUE, error);\n}\n\nCcnetEmailUser*\nccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id)\n{\n    CcnetDB *db = manager->priv->db;\n    char *sql;\n    CcnetEmailUser *emailuser = NULL;\n\n    sql = \"SELECT e.id, e.email, is_staff, is_active, ctime, passwd, reference_id, role \"\n        \" FROM EmailUser e LEFT JOIN UserRole ON e.email = UserRole.email \"\n        \" WHERE e.id=?\";\n    if (seaf_db_statement_foreach_row (db, sql, get_emailuser_cb, &emailuser,\n                                        1, \"int\", id) < 0)\n        return NULL;\n\n    return emailuser;\n}\n\nstatic gboolean\nget_emailusers_cb (CcnetDBRow *row, void *data)\n{\n    GList **plist = data;\n    CcnetEmailUser *emailuser;\n\n    int id = seaf_db_row_get_column_int (row, 0);\n    const char *email = (const char *)seaf_db_row_get_column_text (row, 1);\n    int is_staff = seaf_db_row_get_column_int (row, 2);\n    int is_active = seaf_db_row_get_column_int (row, 3);\n    gint64 ctime = seaf_db_row_get_column_int64 (row, 4);\n    const char *role = (const char *)seaf_db_row_get_column_text (row, 5);\n    const char *password = seaf_db_row_get_column_text (row, 6);\n\n    char *email_l = g_ascii_strdown (email, -1);\n    emailuser = g_object_new (CCNET_TYPE_EMAIL_USER,\n                              \"id\", id,\n                              \"email\", email_l,\n                              \"is_staff\", is_staff,\n                              \"is_active\", is_active,\n                              \"ctime\", ctime,\n                              \"role\", role ? role : \"\",\n                              \"source\", \"DB\",\n                              \"password\", password,\n                              NULL);\n    g_free (email_l);\n\n    *plist = g_list_prepend (*plist, emailuser);\n\n    return TRUE;\n}\n\nGList*\nccnet_user_manager_get_emailusers (CcnetUserManager *manager,\n                                   const char *source,\n                                   int start, int limit,\n                                   const char *status)\n{\n    CcnetDB *db = manager->priv->db;\n    const char *status_condition = \"\";\n    char *sql = NULL;\n    GList *ret = NULL;\n    int rc;\n\n    if (g_strcmp0 (source, \"DB\") != 0)\n        return NULL;\n\n    if (start == -1 && limit == -1) {\n        if (g_strcmp0(status, \"active\") == 0)\n            status_condition = \"WHERE t1.is_active = 1\";\n        else if (g_strcmp0(status, \"inactive\") == 0)\n            status_condition = \"WHERE t1.is_active = 0\";\n\n        sql = g_strdup_printf (\"SELECT t1.id, t1.email, \"\n                               \"t1.is_staff, t1.is_active, t1.ctime, \"\n                               \"t2.role, t1.passwd FROM EmailUser t1 \"\n                               \"LEFT JOIN UserRole t2 \"\n                               \"ON t1.email = t2.email %s \"\n                               \"WHERE t1.email NOT LIKE '%%@seafile_group'\",\n                               status_condition);\n\n        rc = seaf_db_statement_foreach_row (db,\n                                             sql,\n                                             get_emailusers_cb, &ret,\n                                             0);\n        g_free (sql);\n    } else {\n        if (g_strcmp0(status, \"active\") == 0)\n            status_condition = \"WHERE t1.is_active = 1\";\n        else if (g_strcmp0(status, \"inactive\") == 0)\n            status_condition = \"WHERE t1.is_active = 0\";\n\n        sql = g_strdup_printf (\"SELECT t1.id, t1.email, \"\n                               \"t1.is_staff, t1.is_active, t1.ctime, \"\n                               \"t2.role, t1.passwd FROM EmailUser t1 \"\n                               \"LEFT JOIN UserRole t2 \"\n                               \"ON t1.email = t2.email %s \"\n                               \"WHERE t1.email NOT LIKE '%%@seafile_group' \"\n                               \"ORDER BY t1.id LIMIT ? OFFSET ?\",\n                               status_condition);\n\n        rc = seaf_db_statement_foreach_row (db,\n                                             sql,\n                                             get_emailusers_cb, &ret,\n                                             2, \"int\", limit, \"int\", start);\n        g_free (sql);\n    }\n\n    if (rc < 0) {\n        while (ret != NULL) {\n            g_object_unref (ret->data);\n            ret = g_list_delete_link (ret, ret);\n        }\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nGList*\nccnet_user_manager_search_emailusers (CcnetUserManager *manager,\n                                      const char *source,\n                                      const char *keyword,\n                                      int start, int limit)\n{\n    CcnetDB *db = manager->priv->db;\n    GList *ret = NULL;\n    int rc;\n    char *db_patt = g_strdup_printf (\"%%%s%%\", keyword);\n\n    if (strcmp (source, \"DB\") != 0) {\n        g_free (db_patt);\n        return NULL;\n    }\n\n    if (start == -1 && limit == -1)\n        rc = seaf_db_statement_foreach_row (db,\n                                             \"SELECT t1.id, t1.email, \"\n                                             \"t1.is_staff, t1.is_active, t1.ctime, \"\n                                             \"t2.role, t1.passwd FROM EmailUser t1 \"\n                                             \"LEFT JOIN UserRole t2 \"\n                                             \"ON t1.email = t2.email \"\n                                             \"WHERE t1.Email LIKE ? \"\n                                             \"AND t1.email NOT LIKE '%%@seafile_group' \"\n                                             \"ORDER BY t1.id\",\n                                             get_emailusers_cb, &ret,\n                                             1, \"string\", db_patt);\n    else\n        rc = seaf_db_statement_foreach_row (db,\n                                             \"SELECT t1.id, t1.email, \"\n                                             \"t1.is_staff, t1.is_active, t1.ctime, \"\n                                             \"t2.role, t1.passwd FROM EmailUser t1 \"\n                                             \"LEFT JOIN UserRole t2 \"\n                                             \"ON t1.email = t2.email \"\n                                             \"WHERE t1.Email LIKE ? \"\n                                             \"AND t1.email NOT LIKE '%%@seafile_group' \"\n                                             \"ORDER BY t1.id LIMIT ? OFFSET ?\",\n                                             get_emailusers_cb, &ret,\n                                             3, \"string\", db_patt,\n                                             \"int\", limit, \"int\", start);\n    g_free (db_patt);\n    if (rc < 0) {\n        while (ret != NULL) {\n            g_object_unref (ret->data);\n            ret = g_list_delete_link (ret, ret);\n        }\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\ngint64\nccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source)\n{\n    CcnetDB* db = manager->priv->db;\n    char sql[512];\n    gint64 ret;\n\n    if (g_strcmp0 (source, \"DB\") != 0)\n        return -1;\n\n    snprintf (sql, 512, \"SELECT COUNT(id) FROM EmailUser WHERE is_active = 1\");\n\n    ret = seaf_db_get_int64 (db, sql);\n    if (ret < 0)\n        return -1;\n    return ret;\n}\n\ngint64\nccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source)\n{\n    CcnetDB* db = manager->priv->db;\n    char sql[512];\n    gint64 ret;\n\n    if (g_strcmp0 (source, \"DB\") != 0)\n        return -1;\n\n    snprintf (sql, 512, \"SELECT COUNT(id) FROM EmailUser WHERE is_active = 0\");\n\n    ret = seaf_db_get_int64 (db, sql);\n    if (ret < 0)\n        return -1;\n    return ret;\n}\n\n#if 0\nGList*\nccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager,\n                                               const char *emails)\n{\n    CcnetDB *db = manager->priv->db;\n    char *copy = g_strdup (emails), *saveptr;\n    GList *ret = NULL;\n\n    GString *sql = g_string_new(NULL);\n\n    g_string_append (sql, \"SELECT * FROM EmailUser WHERE Email IN (\");\n    char *name = strtok_r (copy, \", \", &saveptr);\n    while (name != NULL) {\n        g_string_append_printf (sql, \"'%s',\", name);\n        name = strtok_r (NULL, \", \", &saveptr);\n    }\n    g_string_erase (sql, sql->len-1, 1); /* remove last \",\" */\n    g_string_append (sql, \")\");\n\n    if (seaf_db_foreach_selected_row (db, sql->str, get_emailusers_cb,\n        &ret) < 0) {\n        while (ret != NULL) {\n            g_object_unref (ret->data);\n            ret = g_list_delete_link (ret, ret);\n        }\n        return NULL;\n    }\n\n    g_free (copy);\n    g_string_free (sql, TRUE);\n\n    return g_list_reverse (ret);\n}\n#endif\n\nint\nccnet_user_manager_update_emailuser (CcnetUserManager *manager,\n                                     const char *source,\n                                     int id, const char* passwd,\n                                     int is_staff, int is_active)\n{\n    CcnetDB* db = manager->priv->db;\n    char *db_passwd = NULL;\n\n    // in case set user user1 to inactive, then add another active user user2,\n    // if current user num already the max user num,\n    // then reset user1 to active should fail\n    if (is_active && !check_user_number (manager, FALSE)) {\n        return -1;\n    }\n\n    if (strcmp (source, \"DB\") == 0) {\n        if (g_strcmp0 (passwd, \"!\") == 0) {\n            /* Don't update passwd if it starts with '!' */\n            return seaf_db_statement_query (db, \"UPDATE EmailUser SET is_staff=?, \"\n                                             \"is_active=? WHERE id=?\",\n                                             3, \"int\", is_staff, \"int\", is_active,\n                                             \"int\", id);\n        } else {\n            hash_password_pbkdf2_sha256 (passwd, manager->passwd_hash_iter, &db_passwd);\n\n            return seaf_db_statement_query (db, \"UPDATE EmailUser SET passwd=?, \"\n                                             \"is_staff=?, is_active=? WHERE id=?\",\n                                             4, \"string\", db_passwd, \"int\", is_staff,\n                                             \"int\", is_active, \"int\", id);\n        }\n    }\n\n    return -1;\n}\n\nstatic gboolean\nget_role_emailuser_cb (CcnetDBRow *row, void *data)\n{\n    *((char **)data) = g_strdup (seaf_db_row_get_column_text (row, 0));\n\n    return FALSE;\n}\n\nstatic char*\nccnet_user_manager_get_role_emailuser (CcnetUserManager *manager,\n                                     const char* email)\n{\n\n    CcnetDB *db = manager->priv->db;\n    const char *sql;\n    char* role;\n\n    sql = \"SELECT role FROM UserRole WHERE email=?\";\n    if (seaf_db_statement_foreach_row (db, sql, get_role_emailuser_cb, &role,\n                                        1, \"string\", email) > 0)\n        return role;\n\n    return NULL;\n}\n\nint\nccnet_user_manager_update_role_emailuser (CcnetUserManager *manager,\n                                     const char* email, const char* role)\n{\n    CcnetDB* db = manager->priv->db;\n    char *old_role = ccnet_user_manager_get_role_emailuser (manager, email);\n    if (old_role) {\n        g_free (old_role);\n        return seaf_db_statement_query (db, \"UPDATE UserRole SET role=? \"\n                                         \"WHERE email=?\",\n                                         2, \"string\", role, \"string\", email);\n    } else\n        return seaf_db_statement_query (db, \"INSERT INTO UserRole(role, email)\"\n                                         \" VALUES (?, ?)\",\n                                         2, \"string\", role, \"string\", email);\n}\n\nGList*\nccnet_user_manager_get_superusers(CcnetUserManager *manager)\n{\n    CcnetDB* db = manager->priv->db;\n    GList *ret = NULL;\n    char sql[512];\n\n    snprintf (sql, 512,\n              \"SELECT t1.id, t1.email, \"\n              \"t1.is_staff, t1.is_active, t1.ctime, \"\n              \"t2.role, t1.passwd FROM EmailUser t1 \"\n              \"LEFT JOIN UserRole t2 \"\n              \"ON t1.email = t2.email \"\n              \"WHERE is_staff = 1 AND t1.email NOT LIKE '%%@seafile_group';\");\n\n    if (seaf_db_foreach_selected_row (db, sql, get_emailusers_cb, &ret) < 0) {\n        while (ret != NULL) {\n            g_object_unref (ret->data);\n            ret = g_list_delete_link (ret, ret);\n        }\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nchar *\nccnet_user_manager_get_login_id (CcnetUserManager *manager, const char *primary_id)\n{\n    return g_strdup (primary_id);\n}\n\nGList *\nccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager,\n                                           const char *source,\n                                           const char *user_list,\n                                           GError **error)\n{\n    int i;\n    const char *username;\n    json_t *j_array = NULL, *j_obj;\n    json_error_t j_error;\n    GList *ret = NULL;\n    const char *args[20];\n\n    j_array = json_loadb (user_list, strlen(user_list), 0, &j_error);\n    if (!j_array) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Bad args.\");\n        return NULL;\n    }\n    /* Query 20 users at most. */\n    size_t user_num = json_array_size (j_array);\n    if (user_num > 20) {\n        g_set_error (error, CCNET_DOMAIN, 0, \"Number of users exceeds 20.\");\n        json_decref (j_array);\n        return NULL;\n    }\n    GString *sql = g_string_new (\"\");\n    for (i = 0; i < 20; i++) {\n        if (i < user_num) {\n            j_obj = json_array_get (j_array, i);\n            username = json_string_value(j_obj);\n            args[i] = username;\n        } else {\n            args[i] = \"\";\n        }\n    }\n\n    if (strcmp (source, \"DB\") != 0)\n        goto out;\n\n    g_string_printf (sql, \"SELECT e.id, e.email, is_staff, is_active, ctime, \"\n                          \"role, passwd FROM EmailUser e \"\n                          \"LEFT JOIN UserRole r ON e.email = r.email \"\n                          \"WHERE e.email IN (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\");\n\n    if (seaf_db_statement_foreach_row (manager->priv->db, sql->str, get_emailusers_cb, &ret, 20,\n                                        \"string\", args[0], \"string\", args[1], \"string\", args[2],\n                                        \"string\", args[3], \"string\", args[4], \"string\", args[5],\n                                        \"string\", args[6], \"string\", args[7], \"string\", args[8],\n                                        \"string\", args[9], \"string\", args[10], \"string\", args[11],\n                                        \"string\", args[12], \"string\", args[13], \"string\", args[14],\n                                        \"string\", args[15], \"string\", args[16], \"string\", args[17],\n                                        \"string\", args[18], \"string\", args[19]) < 0)\n        ccnet_warning(\"Failed to get users in list %s.\\n\", user_list);\n\nout:\n    json_decref (j_array);\n    g_string_free (sql, TRUE);\n\n    return ret;\n}\n\nint\nccnet_user_manager_update_emailuser_id (CcnetUserManager *manager,\n                                        const char *old_email,\n                                        const char *new_email,\n                                        GError **error)\n{\n    int ret = -1;\n    int rc;\n    GString *sql = g_string_new (\"\");\n\n    //1.update RepoOwner\n    g_string_printf (sql, \"UPDATE RepoOwner SET owner_id=? WHERE owner_id=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update repo owner\\n\");\n        goto out;\n    }\n\n    //2.update SharedRepo\n    g_string_printf (sql, \"UPDATE SharedRepo SET from_email=? WHERE from_email=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update from_email\\n\");\n        goto out;\n    }\n\n    g_string_printf (sql, \"UPDATE SharedRepo SET to_email=? WHERE to_email=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update to_email\\n\");\n        goto out;\n    }\n\n    //3.update GroupUser\n    rc = ccnet_group_manager_update_group_user (seaf->group_mgr, old_email, new_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update group member\\n\");\n        goto out;\n    }\n\n    //4.update RepoUserToken\n    g_string_printf (sql, \"UPDATE RepoUserToken SET email=? WHERE email=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update repo user token\\n\");\n        goto out;\n    }\n\n    //5.uptede FolderUserPerm\n    g_string_printf (sql, \"UPDATE FolderUserPerm SET user=? WHERE user=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update user folder permission\\n\");\n        goto out;\n    }\n\n    //6.update EmailUser\n    g_string_printf (sql, \"UPDATE EmailUser SET email=? WHERE email=?\");\n    rc = seaf_db_statement_query (manager->priv->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update email user\\n\");\n        goto out;\n    }\n\n    //7.update UserQuota\n    g_string_printf (sql, \"UPDATE UserQuota SET user=? WHERE user=?\");\n    rc = seaf_db_statement_query (seaf->db, sql->str, 2,\n                                  \"string\", new_email,\n                                  \"string\", old_email);\n    if (rc < 0){\n        ccnet_warning (\"Failed to update user quota\\n\");\n        goto out;\n    }\n\n    ret = 0;\nout:\n    g_string_free (sql, TRUE);\n    return ret;\n}\n"
  },
  {
    "path": "common/user-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef CCNET_USER_MGR_H\n#define CCNET_USER_MGR_H\n\n#include <glib.h>\n#include <glib-object.h>\n\n#define CCNET_TYPE_USER_MANAGER                  (ccnet_user_manager_get_type ())\n#define CCNET_USER_MANAGER(obj)                  (G_TYPE_CHECK_INSTANCE_CAST ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManager))\n#define CCNET_IS_USER_MANAGER(obj)               (G_TYPE_CHECK_INSTANCE_TYPE ((obj), CCNET_TYPE_USER_MANAGER))\n#define CCNET_USER_MANAGER_CLASS(klass)          (G_TYPE_CHECK_CLASS_CAST ((klass), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass))\n#define CCNET_IS_USER_MANAGER_CLASS(klass)       (G_TYPE_CHECK_CLASS_TYPE ((klass), CCNET_TYPE_USER_MANAGER))\n#define CCNET_USER_MANAGER_GET_CLASS(obj)        (G_TYPE_INSTANCE_GET_CLASS ((obj), CCNET_TYPE_USER_MANAGER, CcnetUserManagerClass))\n\n\ntypedef struct _SeafileSession SeafileSession;\ntypedef struct _CcnetUserManager CcnetUserManager;\ntypedef struct _CcnetUserManagerClass CcnetUserManagerClass;\n\ntypedef struct CcnetUserManagerPriv CcnetUserManagerPriv;\n\n\nstruct _CcnetUserManager\n{\n    GObject         parent_instance;\n\n    SeafileSession   *session;\n    \n    char           *userdb_path;\n    GHashTable     *user_hash;\n\n#ifdef HAVE_LDAP\n    /* LDAP related */\n    gboolean        use_ldap;\n    char           *ldap_host;\n#ifdef WIN32\n    gboolean        use_ssl;\n#endif\n    char           **base_list;  /* base DN from where all users can be reached */\n    char           *filter;     /* Additional search filter */\n    char           *user_dn;    /* DN of the admin user */\n    char           *password;   /* password for admin user */\n    char           *login_attr;  /* attribute name used for login */\n    gboolean        follow_referrals; /* Follow referrals returned by the server. */\n#endif\n\n    int passwd_hash_iter;\n\n    CcnetUserManagerPriv *priv;\n};\n\nstruct _CcnetUserManagerClass\n{\n    GObjectClass    parent_class;\n};\n\nGType ccnet_user_manager_get_type  (void);\n\nCcnetUserManager* ccnet_user_manager_new (SeafileSession *);\nint ccnet_user_manager_prepare (CcnetUserManager *manager);\n\nvoid ccnet_user_manager_free (CcnetUserManager *manager);\n\nvoid ccnet_user_manager_start (CcnetUserManager *manager);\n\nvoid\nccnet_user_manager_set_max_users (CcnetUserManager *manager, gint64 max_users);\n\nint\nccnet_user_manager_add_emailuser (CcnetUserManager *manager,\n                                  const char *email,\n                                  const char *encry_passwd,\n                                  int is_staff, int is_active);\n\nint\nccnet_user_manager_remove_emailuser (CcnetUserManager *manager,\n                                     const char *source,\n                                     const char *email);\n\nint\nccnet_user_manager_validate_emailuser (CcnetUserManager *manager,\n                                       const char *email,\n                                       const char *passwd);\n\nCcnetEmailUser*\nccnet_user_manager_get_emailuser (CcnetUserManager *manager, const char *email, GError **error);\n\nCcnetEmailUser*\nccnet_user_manager_get_emailuser_with_import (CcnetUserManager *manager,\n                                              const char *email,\n                                              GError **error);\nCcnetEmailUser*\nccnet_user_manager_get_emailuser_by_id (CcnetUserManager *manager, int id);\n\n/*\n * @source: \"DB\" or \"LDAP\".\n * @status: \"\", \"active\", or \"inactive\". returns all users when this argument is \"\".\n */\nGList*\nccnet_user_manager_get_emailusers (CcnetUserManager *manager,\n                                   const char *source,\n                                   int start, int limit,\n                                   const char *status);\n\nGList*\nccnet_user_manager_search_emailusers (CcnetUserManager *manager,\n                                      const char *source,\n                                      const char *keyword,\n                                      int start, int limit);\n\ngint64\nccnet_user_manager_count_emailusers (CcnetUserManager *manager, const char *source);\n\ngint64\nccnet_user_manager_count_inactive_emailusers (CcnetUserManager *manager, const char *source);\n\nGList*\nccnet_user_manager_filter_emailusers_by_emails(CcnetUserManager *manager,\n                                               const char *emails);\n\nint\nccnet_user_manager_update_emailuser (CcnetUserManager *manager,\n                                     const char *source,\n                                     int id, const char* passwd,\n                                     int is_staff, int is_active);\n\nint\nccnet_user_manager_update_role_emailuser (CcnetUserManager *manager,\n                                     const char* email, const char* role);\n\nGList*\nccnet_user_manager_get_superusers(CcnetUserManager *manager);\n\n/* Remove one specific peer-id binding to an email */\n\nchar *\nccnet_user_manager_get_login_id (CcnetUserManager *manager,\n                                 const char *primary_id);\n\nGList *\nccnet_user_manager_get_emailusers_in_list (CcnetUserManager *manager,\n                                           const char *source,\n                                           const char *user_list,\n                                           GError **error);\n\nint\nccnet_user_manager_update_emailuser_id (CcnetUserManager *manager,\n                                        const char *old_email,\n                                        const char *new_email,\n                                        GError **error);\n#endif\n"
  },
  {
    "path": "common/vc-common.c",
    "content": "#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"vc-common.h\"\n\n#include \"log.h\"\n#include \"seafile-error.h\"\n\nstatic GList *\nmerge_bases_many (SeafCommit *one, int n, SeafCommit **twos);\n\nstatic gint\ncompare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused)\n{\n    const SeafCommit *commit_a = a;\n    const SeafCommit *commit_b = b;\n\n    /* Latest commit comes first in the list. */\n    return (commit_b->ctime - commit_a->ctime);\n}\n\nstatic gint\ncompare_commit (gconstpointer a, gconstpointer b)\n{\n    const SeafCommit *commit_a = a;\n    const SeafCommit *commit_b = b;\n\n    return strcmp (commit_a->commit_id, commit_b->commit_id);\n}\n\nstatic gboolean\nadd_to_commit_hash (SeafCommit *commit, void *vhash, gboolean *stop)\n{\n    GHashTable *hash = vhash;\n\n    char *key = g_strdup (commit->commit_id);\n    g_hash_table_replace (hash, key, key);\n\n    return TRUE;\n}\n\nstatic GHashTable *\ncommit_tree_to_hash (SeafCommit *head)\n{\n    GHashTable *hash;\n    gboolean res;\n\n    hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                    head->repo_id,\n                                                    head->version,\n                                                    head->commit_id,\n                                                    add_to_commit_hash,\n                                                    hash, FALSE);\n    if (!res)\n        goto fail;\n\n    return hash;\n\nfail:\n    g_hash_table_destroy (hash);\n    return NULL;\n}\n\nstatic GList *\nget_independent_commits (GList *commits)\n{\n    SeafCommit **rslt;\n    GList *list, *result;\n    int cnt, i, j;\n    SeafCommit *c;\n\n    g_debug (\"Get independent commits.\\n\");\n\n    cnt = g_list_length (commits);\n\n    rslt = calloc(cnt, sizeof(*rslt));\n    for (list = commits, i = 0; list; list = list->next)\n        rslt[i++] = list->data;\n    g_list_free (commits);\n\n    for (i = 0; i < cnt - 1; i++) {\n        for (j = i+1; j < cnt; j++) {\n            if (!rslt[i] || !rslt[j])\n                continue;\n            result = merge_bases_many(rslt[i], 1, &rslt[j]);\n            for (list = result; list; list = list->next) {\n                c = list->data;\n                /* If two commits have fast-forward relationship,\n                 * drop the older one.\n                 */\n                if (strcmp (rslt[i]->commit_id, c->commit_id) == 0) {\n                    seaf_commit_unref (rslt[i]);\n                    rslt[i] = NULL;\n                }\n                if (strcmp (rslt[j]->commit_id, c->commit_id) == 0) {\n                    seaf_commit_unref (rslt[j]);\n                    rslt[j] = NULL;\n                }\n                seaf_commit_unref (c);\n            }\n        }\n    }\n\n    /* Surviving ones in rslt[] are the independent results */\n    result = NULL;\n    for (i = 0; i < cnt; i++) {\n        if (rslt[i])\n            result = g_list_insert_sorted_with_data (result, rslt[i],\n                                                     compare_commit_by_time,\n                                                     NULL);\n    }\n    free(rslt);\n    return result;\n}\n\ntypedef struct {\n    GList *result;\n    GHashTable *commit_hash;\n} MergeTraverseData;\n\nstatic gboolean\nget_merge_bases (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    MergeTraverseData *data = vdata;\n\n    /* Found a common ancestor.\n     * Dont traverse its parenets.\n     */\n    if (g_hash_table_lookup (data->commit_hash, commit->commit_id)) {\n        if (!g_list_find_custom (data->result, commit, compare_commit)) {\n            data->result = g_list_insert_sorted_with_data (data->result, commit,\n                                                     compare_commit_by_time,\n                                                     NULL);\n            seaf_commit_ref (commit);\n        }\n        *stop = TRUE;\n    }\n\n    return TRUE;\n}\n\n/*\n * Merge \"one\" with commits in \"twos\".\n * The ancestors returned may not be ancestors for all the input commits.\n * They are common ancestors for one and some commits in twos array.\n */\nstatic GList *\nmerge_bases_many (SeafCommit *one, int n, SeafCommit **twos)\n{\n    GHashTable *commit_hash;\n    GList *result = NULL;\n    SeafCommit *commit;\n    int i;\n    MergeTraverseData data;\n    gboolean res;\n\n    for (i = 0; i < n; i++) {\n        if (one == twos[i])\n            return g_list_append (result, one);\n    }\n\n    /* First construct a hash table of all commit ids rooted at one. */\n    commit_hash = commit_tree_to_hash (one);\n    if (!commit_hash) {\n        g_warning (\"Failed to load commit hash.\\n\");\n        return NULL;\n    }\n\n    data.commit_hash = commit_hash;\n    data.result = NULL;\n\n    for (i = 0; i < n; i++) {\n        res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                        twos[i]->repo_id,\n                                                        twos[i]->version,\n                                                        twos[i]->commit_id,\n                                                        get_merge_bases,\n                                                        &data, FALSE);\n        if (!res)\n            goto fail;\n    }\n\n    g_hash_table_destroy (commit_hash);\n    result = data.result;\n\n    if (!result || !result->next)\n        return result;\n\n    /* There are more than one. Try to find out independent ones. */\n    result = get_independent_commits (result);\n\n    return result;\n\nfail:\n    result = data.result;\n    while (result) {\n        commit = result->data;\n        seaf_commit_unref (commit);\n        result = g_list_delete_link (result, result);\n    }\n    g_hash_table_destroy (commit_hash);\n    return NULL;\n}\n\n/*\n * Returns common ancesstor for two branches.\n * Any two commits should have a common ancestor.\n * So returning NULL indicates an error, for e.g. corupt commit.\n */\nSeafCommit *\nget_merge_base (SeafCommit *head, SeafCommit *remote)\n{\n    GList *result, *iter;\n    SeafCommit *one, **twos;\n    int n, i;\n    SeafCommit *ret = NULL;\n\n    one = head;\n    twos = (SeafCommit **) calloc (1, sizeof(SeafCommit *));\n    twos[0] = remote;\n    n = 1;\n    result = merge_bases_many (one, n, twos);\n    free (twos);\n    if (!result || !result->next)\n        goto done;\n\n    /*\n     * More than one common ancestors.\n     * Loop until the oldest common ancestor is found.\n     */\n    while (1) {\n        n = g_list_length (result) - 1;\n        one = result->data;\n        twos = calloc (n, sizeof(SeafCommit *));\n        for (iter = result->next, i = 0; i < n; iter = iter->next, i++) {\n            twos[i] = iter->data;\n        }\n        g_list_free (result);\n\n        result = merge_bases_many (one, n, twos);\n        free (twos);\n        if (!result || !result->next)\n            break;\n    }\n\ndone:\n    if (result)\n        ret = result->data;\n    g_list_free (result);\n\n    return ret;\n}\n\n/*\n * Returns true if src_head is ahead of dst_head.\n */\ngboolean\nis_fast_forward (const char *repo_id, int version,\n                 const char *src_head, const char *dst_head)\n{\n    VCCompareResult res;\n\n    res = vc_compare_commits (repo_id, version, src_head, dst_head);\n\n    return (res == VC_FAST_FORWARD);\n}\n\nVCCompareResult\nvc_compare_commits (const char *repo_id, int version,\n                    const char *c1, const char *c2)\n{\n    SeafCommit *commit1, *commit2, *ca;\n    VCCompareResult ret;\n\n    /* Treat the same as up-to-date. */\n    if (strcmp (c1, c2) == 0)\n        return VC_UP_TO_DATE;\n\n    commit1 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c1);\n    if (!commit1)\n        return VC_INDEPENDENT;\n\n    commit2 = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id, version, c2);\n    if (!commit2) {\n        seaf_commit_unref (commit1);\n        return VC_INDEPENDENT;\n    }\n\n    ca = get_merge_base (commit1, commit2);\n\n    if (!ca)\n        ret = VC_INDEPENDENT;\n    else if (strcmp(ca->commit_id, commit1->commit_id) == 0)\n        ret = VC_UP_TO_DATE;\n    else if (strcmp(ca->commit_id, commit2->commit_id) == 0)\n        ret = VC_FAST_FORWARD;\n    else\n        ret = VC_INDEPENDENT;\n\n    if (ca) seaf_commit_unref (ca);\n    seaf_commit_unref (commit1);\n    seaf_commit_unref (commit2);\n    return ret;\n}\n\n/**\n * Diff a specific file with parent(s).\n * If @commit is a merge, both parents will be compared.\n * @commit must have this file and it's id is given in @file_id.\n * \n * Returns 0 if there is no difference; 1 otherwise.\n * If returns 0, @parent will point to the next commit to traverse.\n * If I/O error occurs, @error will be set.\n */\nstatic int\ndiff_parents_with_path (SeafCommit *commit,\n                        const char *repo_id,\n                        const char *store_id,\n                        int version,\n                        const char *path,\n                        const char *file_id,\n                        char *parent,\n                        GError **error)\n{\n    SeafCommit *p1 = NULL, *p2 = NULL;\n    char *file_id_p1 = NULL, *file_id_p2 = NULL;\n    int ret = 0;\n\n    p1 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                         commit->repo_id,\n                                         commit->version,\n                                         commit->parent_id);\n    if (!p1) {\n        g_warning (\"Failed to find commit %s.\\n\", commit->parent_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \" \");\n        return 0;\n    }\n\n    if (strcmp (p1->root_id, EMPTY_SHA1) == 0) {\n        seaf_commit_unref (p1);\n        return 1;\n    }\n\n    if (commit->second_parent_id) {\n        p2 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             commit->repo_id,\n                                             commit->version,\n                                             commit->second_parent_id);\n        if (!p2) {\n            g_warning (\"Failed to find commit %s.\\n\", commit->second_parent_id);\n            seaf_commit_unref (p1);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \" \");\n            return 0;\n        }\n    }\n\n    if (!p2) {\n        file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                     store_id,\n                                                     version,\n                                                     p1->root_id, path,\n                                                     NULL,\n                                                     error);\n        if (*error)\n            goto out;\n        if (!file_id_p1 || strcmp (file_id, file_id_p1) != 0)\n            ret = 1;\n        else\n            memcpy (parent, p1->commit_id, 41);\n    } else {\n        file_id_p1 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                     store_id,\n                                                     version,\n                                                     p1->root_id, path,\n                                                     NULL, error);\n        if (*error)\n            goto out;\n        file_id_p2 = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                     store_id,\n                                                     version,\n                                                     p2->root_id, path,\n                                                     NULL, error);\n        if (*error)\n            goto out;\n\n        if (file_id_p1 && file_id_p2) {\n            if (strcmp(file_id, file_id_p1) != 0 &&\n                strcmp(file_id, file_id_p2) != 0)\n                ret = 1;\n            else if (strcmp(file_id, file_id_p1) == 0)\n                memcpy (parent, p1->commit_id, 41);\n            else\n                memcpy (parent, p2->commit_id, 41);\n        } else if (file_id_p1 && !file_id_p2) {\n            if (strcmp(file_id, file_id_p1) != 0)\n                ret = 1;\n            else\n                memcpy (parent, p1->commit_id, 41);\n        } else if (!file_id_p1 && file_id_p2) {\n            if (strcmp(file_id, file_id_p2) != 0)\n                ret = 1;\n            else\n                memcpy (parent, p2->commit_id, 41);\n        } else {\n            ret = 1;\n        }\n    }\n\nout:\n    g_free (file_id_p1);\n    g_free (file_id_p2);\n\n    if (p1)\n        seaf_commit_unref (p1);\n    if (p2)\n        seaf_commit_unref (p2);\n\n    return ret;\n}\n\nstatic int\nget_file_modifier_mtime_v0 (const char *repo_id, const char *store_id, int version,\n                            const char *head, const char *path,\n                            char **modifier, gint64 *mtime)\n{\n    char commit_id[41];\n    SeafCommit *commit = NULL;\n    char *file_id = NULL;\n    int changed;\n    int ret = 0;\n    GError *error = NULL;\n\n    *modifier = NULL;\n    *mtime = 0;\n\n    memcpy (commit_id, head, 41);\n\n    while (1) {\n        commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                 repo_id, version,\n                                                 commit_id);\n        if (!commit) {\n            ret = -1;\n            break;\n        }\n\n        /* We hit the initial commit. */\n        if (!commit->parent_id)\n            break;\n\n        file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                  store_id, version,\n                                                  commit->root_id,\n                                                  path,\n                                                  NULL,\n                                                  &error);\n        if (error) {\n            g_clear_error (&error);\n            ret = -1;\n            break;\n        }\n        /* We expect commit to have this file. */\n        if (!file_id) {\n            ret = -1;\n            break;\n        }\n\n        changed = diff_parents_with_path (commit,\n                                          repo_id, store_id, version,\n                                          path, file_id,\n                                          commit_id, &error);\n        if (error) {\n            g_clear_error (&error);\n            ret = -1;\n            break;\n        }\n\n        if (changed) {\n            *modifier = g_strdup (commit->creator_name);\n            *mtime = commit->ctime;\n            break;\n        } else {\n            /* If this commit doesn't change the file, commit_id will be set\n             * to the parent commit to traverse.\n             */\n            g_free (file_id);\n            seaf_commit_unref (commit);\n        }\n    }\n\n    g_free (file_id);\n    if (commit)\n        seaf_commit_unref (commit);\n    return ret;\n}\n\nstatic int\nget_file_modifier_mtime_v1 (const char *repo_id, const char *store_id, int version,\n                            const char *head, const char *path,\n                            char **modifier, gint64 *mtime)\n{\n    SeafCommit *commit = NULL;\n    SeafDir *dir = NULL;\n    SeafDirent *dent = NULL;\n    int ret = 0;\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo_id, version,\n                                             head);\n    if (!commit) {\n        seaf_warning (\"Failed to get commit %s.\\n\", head);\n        return -1;\n    }\n\n    char *parent = g_path_get_dirname (path);\n    if (strcmp(parent, \".\") == 0) {\n        g_free (parent);\n        parent = g_strdup(\"\");\n    }\n    char *filename = g_path_get_basename (path);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               store_id, version,\n                                               commit->root_id,\n                                               parent, NULL);\n    if (!dir) {\n        seaf_warning (\"dir %s doesn't exist in repo %s.\\n\", parent, repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    GList *p;\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        if (strcmp (d->name, filename) == 0) {\n            dent = d;\n            break;\n        }\n    }\n\n    if (!dent) {\n        goto out;\n    }\n\n    *modifier = g_strdup(dent->modifier);\n    *mtime = dent->mtime;\n\nout:\n    g_free (parent);\n    g_free (filename);\n    seaf_commit_unref (commit);\n    seaf_dir_free (dir);\n\n    return ret;\n}\n\n/**\n * Get the user who last changed a file and the mtime.\n * @head: head commit to start the search.\n * @path: path of the file.\n */\nint\nget_file_modifier_mtime (const char *repo_id,\n                         const char *store_id,\n                         int version,\n                         const char *head,\n                         const char *path,\n                         char **modifier,\n                         gint64 *mtime)\n{\n    if (version > 0)\n        return get_file_modifier_mtime_v1 (repo_id, store_id, version,\n                                           head, path,\n                                           modifier, mtime);\n    else\n        return get_file_modifier_mtime_v0 (repo_id, store_id, version,\n                                           head, path,\n                                           modifier, mtime);\n}\n\nchar *\ngen_conflict_path (const char *origin_path,\n                   const char *modifier,\n                   gint64 mtime)\n{\n    char time_buf[64];\n    time_t t = (time_t)mtime;\n    char *copy = g_strdup (origin_path);\n    GString *conflict_path = g_string_new (NULL);\n    char *dot, *ext;\n\n    strftime(time_buf, 64, \"%Y-%m-%d-%H-%M-%S\", localtime(&t));\n\n    dot = strrchr (copy, '.');\n\n    if (dot != NULL) {\n        *dot = '\\0';\n        ext = dot + 1;\n        if (modifier)\n            g_string_printf (conflict_path, \"%s (SFConflict %s %s).%s\",\n                             copy, modifier, time_buf, ext);\n        else\n            g_string_printf (conflict_path, \"%s (SFConflict %s).%s\",\n                             copy, time_buf, ext);\n    } else {\n        if (modifier)\n            g_string_printf (conflict_path, \"%s (SFConflict %s %s)\",\n                             copy, modifier, time_buf);\n        else\n            g_string_printf (conflict_path, \"%s (SFConflict %s)\",\n                             copy, time_buf);\n    }\n\n    g_free (copy);\n    return g_string_free (conflict_path, FALSE);\n}\n\nchar *\ngen_conflict_path_wrapper (const char *repo_id, int version,\n                           const char *head, const char *in_repo_path,\n                           const char *original_path)\n{\n    char *modifier;\n    gint64 mtime;\n\n    /* XXX: this function is only used in client, so store_id is always\n     * the same as repo_id. This can be changed if it's also called in\n     * server.\n     */\n    if (get_file_modifier_mtime (repo_id, repo_id, version, head, in_repo_path,\n                                 &modifier, &mtime) < 0)\n        return NULL;\n\n    return gen_conflict_path (original_path, modifier, mtime);\n}\n"
  },
  {
    "path": "common/vc-common.h",
    "content": "#ifndef VC_COMMON_H\n#define VC_COMMON_H\n\n#include \"commit-mgr.h\"\n\nSeafCommit *\nget_merge_base (SeafCommit *head, SeafCommit *remote);\n\n/*\n * Returns true if src_head is ahead of dst_head.\n */\ngboolean\nis_fast_forward (const char *repo_id, int version,\n                 const char *src_head, const char *dst_head);\n\ntypedef enum {\n    VC_UP_TO_DATE,\n    VC_FAST_FORWARD,\n    VC_INDEPENDENT,\n} VCCompareResult;\n\n/*\n * Compares commits c1 and c2 as if we were going to merge c1 into c2.\n * \n * Returns:\n * VC_UP_TO_DATE: if c2 is ahead of c1, or c1 == c2;\n * VC_FAST_FORWARD: if c1 is ahead of c2;\n * VC_INDEPENDENT: if c1 and c2 has no inheritent relationship.\n * Returns VC_INDEPENDENT if c1 or c2 doesn't exist.\n */\nVCCompareResult\nvc_compare_commits (const char *repo_id, int version,\n                    const char *c1, const char *c2);\n\nchar *\ngen_conflict_path (const char *original_path,\n                   const char *modifier,\n                   gint64 mtime);\n\nint\nget_file_modifier_mtime (const char *repo_id, const char *store_id, int version,\n                         const char *head, const char *path,\n                         char **modifier, gint64 *mtime);\n\n/* Wrapper around the above two functions */\nchar *\ngen_conflict_path_wrapper (const char *repo_id, int version,\n                           const char *head, const char *in_repo_path,\n                           const char *original_path);\n\n#endif\n"
  },
  {
    "path": "configure.ac",
    "content": "dnl Process this file with autoconf to produce a configure script.\n\n\nAC_PREREQ(2.61)\nAC_INIT([seafile], [6.0.1], [freeplant@gmail.com])\nAC_CONFIG_HEADER([config.h])\n\nAC_CONFIG_MACRO_DIR([m4])\n\nAM_INIT_AUTOMAKE([1.9 foreign])\n\n#AC_MINGW32\nAC_CANONICAL_BUILD\n\ndnl enable the build of share library by default\nAC_ENABLE_SHARED\n\nAC_SUBST(LIBTOOL_DEPS)\n\n# Checks for programs.\nAC_PROG_CC\n#AM_C_PROTOTYPES\nAC_C_CONST\nAC_PROG_MAKE_SET\n# AC_PROG_RANLIB\nLT_INIT\n\n# Checks for headers.\n#AC_CHECK_HEADERS([arpa/inet.h fcntl.h inttypes.h libintl.h limits.h locale.h netdb.h netinet/in.h stdint.h stdlib.h string.h strings.h sys/ioctl.h sys/socket.h sys/time.h termios.h unistd.h utime.h utmp.h])\n\n# Checks for typedefs, structures, and compiler characteristics.\nAC_SYS_LARGEFILE\n\n# Checks for library functions.\n#AC_CHECK_FUNCS([alarm dup2 ftruncate getcwd gethostbyname gettimeofday memmove memset mkdir rmdir select setlocale socket strcasecmp strchr strdup strrchr strstr strtol uname utime strtok_r sendfile])\n\n# check platform\nAC_MSG_CHECKING(for WIN32)\nif test \"$build_os\" = \"mingw32\" -o \"$build_os\" = \"mingw64\"; then\n  bwin32=true\n  AC_MSG_RESULT(compile in mingw)\nelse\n  AC_MSG_RESULT(no)\nfi\n\nAC_MSG_CHECKING(for Mac)\nif test \"$(uname)\" = \"Darwin\"; then\n  bmac=true\n  AC_MSG_RESULT(compile in mac)\nelse\n  AC_MSG_RESULT(no)\nfi\n\nAC_MSG_CHECKING(for Linux)\nif test \"$bmac\" != \"true\" -a \"$bwin32\" != \"true\"; then\n  blinux=true\n  AC_MSG_RESULT(compile in linux)\nelse\n  AC_MSG_RESULT(no)\nfi\n\n# test which sub-component to compile\n\nif test \"$bwin32\" = true; then\n   compile_tools=no\nfi\n\nif test \"$bmac\" = true; then\n   compile_tools=no\nfi\n\nif test \"$blinux\" = true; then\n   compile_tools=yes\nfi\n\nif test \"$bwin32\" != true; then\n   AC_ARG_ENABLE(fuse, AC_HELP_STRING([--enable-fuse], [enable fuse virtual file system]),\n      [compile_fuse=$enableval],[compile_fuse=\"yes\"])\nfi\n\nAC_ARG_ENABLE(python,\n              AC_HELP_STRING([--enable-python],[build seafile python binding]),\n              [compile_python=$enableval],\n              [compile_python=yes])\n\nAC_ARG_WITH(mysql,\n            AC_HELP_STRING([--with-mysql],[path to mysql_config]),\n            [MYSQL_CONFIG=$with_mysql],\n\t    [MYSQL_CONFIG=\"default_mysql_config\"])\n\nAC_ARG_ENABLE(httpserver, AC_HELP_STRING([--enable-httpserver], [enable httpserver]),\n   [compile_httpserver=$enableval],[compile_httpserver=\"yes\"])\n\nAM_CONDITIONAL([COMPILE_TOOLS], [test \"${compile_tools}\" = \"yes\"])\nAM_CONDITIONAL([COMPILE_PYTHON], [test \"${compile_python}\" = \"yes\"])\nAM_CONDITIONAL([COMPILE_FUSE], [test \"${compile_fuse}\" = \"yes\"])\nAM_CONDITIONAL([WIN32], [test \"$bwin32\" = \"true\"])\nAM_CONDITIONAL([MACOS], [test \"$bmac\" = \"true\"])\nAM_CONDITIONAL([LINUX], [test \"$blinux\" = \"true\"])\n\n\n# check libraries\nif test \"$bwin32\" != true; then\n  if test \"$bmac\" = true; then\n  AC_CHECK_LIB(c, uuid_generate, [echo \"found library uuid\"],\n          AC_MSG_ERROR([*** Unable to find uuid_generate in libc]), )\n  else\n  AC_CHECK_LIB(uuid, uuid_generate, [echo \"found library uuid\"],\n          AC_MSG_ERROR([*** Unable to find uuid library]), )\n  fi\nfi\n\nAC_CHECK_LIB(pthread, pthread_create, [echo \"found library pthread\"], AC_MSG_ERROR([*** Unable to find pthread library]), )\nAC_CHECK_LIB(sqlite3, sqlite3_open,[echo \"found library sqlite3\"] , AC_MSG_ERROR([*** Unable to find sqlite3 library]), )\nAC_CHECK_LIB(crypto, SHA1_Init, [echo \"found library crypto\"], AC_MSG_ERROR([*** Unable to find openssl crypto library]), )\n\ndnl Do we need to use AX_LIB_SQLITE3 to check sqlite?\ndnl AX_LIB_SQLITE3\n\nCONSOLE=\nif test \"$bwin32\" = \"true\"; then\n  AC_ARG_ENABLE(console, AC_HELP_STRING([--enable-console], [enable console]),\n      [console=$enableval],[console=\"yes\"])\n  if test x${console} != xyes ; then\n    CONSOLE=\"-Wl,--subsystem,windows -Wl,--entry,_mainCRTStartup\"\n  fi\nfi\nAC_SUBST(CONSOLE)\n\nif test \"$bwin32\" = true; then\n  LIB_WS32=-lws2_32\n  LIB_GDI32=-lgdi32\n  LIB_RT=\n  LIB_INTL=-lintl\n  LIBS=\n  LIB_RESOLV=\n  LIB_UUID=-lRpcrt4\n  LIB_IPHLPAPI=-liphlpapi\n  LIB_SHELL32=-lshell32\n  LIB_PSAPI=-lpsapi\n  LIB_MAC=\n  MSVC_CFLAGS=\"-D__MSVCRT__ -D__MSVCRT_VERSION__=0x0601\"\n  LIB_CRYPT32=-lcrypt32\n  LIB_ICONV=-liconv\nelif test \"$bmac\" = true ; then\n  LIB_WS32=\n  LIB_GDI32=\n  LIB_RT=\n  LIB_INTL=\n  LIB_RESOLV=-lresolv\n  LIB_UUID=\n  LIB_IPHLPAPI=\n  LIB_SHELL32=\n  LIB_PSAPI=\n  MSVC_CFLAGS=\n  LIB_MAC=\"-framework CoreServices\"\n  LIB_CRYPT32=\n  LIB_ICONV=-liconv\nelse\n  LIB_WS32=\n  LIB_GDI32=\n  LIB_RT=\n  LIB_INTL=\n  LIB_RESOLV=-lresolv\n  LIB_UUID=-luuid\n  LIB_IPHLPAPI=\n  LIB_SHELL32=\n  LIB_PSAPI=\n  LIB_MAC=\n  MSVC_CFLAGS=\n  LIB_CRYPT32=\nfi\n\nAC_SUBST(LIB_WS32)\nAC_SUBST(LIB_GDI32)\nAC_SUBST(LIB_RT)\nAC_SUBST(LIB_INTL)\nAC_SUBST(LIB_RESOLV)\nAC_SUBST(LIB_UUID)\nAC_SUBST(LIB_IPHLPAPI)\nAC_SUBST(LIB_SHELL32)\nAC_SUBST(LIB_PSAPI)\nAC_SUBST(LIB_MAC)\nAC_SUBST(MSVC_CFLAGS)\nAC_SUBST(LIB_CRYPT32)\nAC_SUBST(LIB_ICONV)\n\n\nLIBEVENT_REQUIRED=2.0\nGLIB_REQUIRED=2.16.0\nSEARPC_REQUIRED=1.0\nJANSSON_REQUIRED=2.2.1\nZDB_REQUIRED=2.10\n#LIBNAUTILUS_EXTENSION_REQUIRED=2.30.1\nCURL_REQUIRED=7.17\nFUSE_REQUIRED=2.7.3\nZLIB_REQUIRED=1.2.0\nLIHIBREDIS_REQUIRED=0.15.0\n\nPKG_CHECK_MODULES(SSL, [openssl])\nAC_SUBST(SSL_CFLAGS)\nAC_SUBST(SSL_LIBS)\n\nPKG_CHECK_MODULES(GLIB2, [glib-2.0 >= $GLIB_REQUIRED])\nAC_SUBST(GLIB2_CFLAGS)\nAC_SUBST(GLIB2_LIBS)\n\nPKG_CHECK_MODULES(GOBJECT, [gobject-2.0 >= $GLIB_REQUIRED])\nAC_SUBST(GOBJECT_CFLAGS)\nAC_SUBST(GOBJECT_LIBS)\n\nPKG_CHECK_MODULES(SEARPC, [libsearpc >= $SEARPC_REQUIRED])\nAC_SUBST(SEARPC_CFLAGS)\nAC_SUBST(SEARPC_LIBS)\n\nPKG_CHECK_MODULES(JANSSON, [jansson >= $JANSSON_REQUIRED])\nAC_SUBST(JANSSON_CFLAGS)\nAC_SUBST(JANSSON_LIBS)\n\nPKG_CHECK_MODULES(LIBEVENT, [libevent >= $LIBEVENT_REQUIRED])\nAC_SUBST(LIBEVENT_CFLAGS)\nAC_SUBST(LIBEVENT_LIBS)\n\nPKG_CHECK_MODULES(ZLIB, [zlib >= $ZLIB_REQUIRED])\nAC_SUBST(ZLIB_CFLAGS)\nAC_SUBST(ZLIB_LIBS)\n\nif test \"x${MYSQL_CONFIG}\" = \"xdefault_mysql_config\"; then\n    PKG_CHECK_MODULES(MYSQL, [mysqlclient], [have_mysql=\"yes\"], [have_mysql=\"no\"])\n    if test \"x${have_mysql}\" = \"xyes\"; then\n        AC_SUBST(MYSQL_CFLAGS)\n        AC_SUBST(MYSQL_LIBS)\n        AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled])\n    fi\nelse\n    AC_MSG_CHECKING([for MySQL])\n    MYSQL_CFLAGS=`${MYSQL_CONFIG} --include`\n    MYSQL_LIBS=`${MYSQL_CONFIG} --libs`\n    AC_MSG_RESULT([${MYSQL_CFLAGS}])\n    AC_SUBST(MYSQL_CFLAGS)\n    AC_SUBST(MYSQL_LIBS)\n    AC_DEFINE([HAVE_MYSQL], 1, [Define to 1 if MySQL support is enabled])\nfi\n\nif test \"${compile_httpserver}\" = \"yes\"; then\n    AC_DEFINE([HAVE_EVHTP], [1], [Define to 1 if httpserver is enabled.])\n    AC_SUBST(EVHTP_LIBS, \"-levhtp\")\nfi\n\nPKG_CHECK_MODULES(LIBHIREDIS, [hiredis >= $LIHIBREDIS_REQUIRED])\nAC_SUBST(LIBHIREDIS_CFLAGS)\nAC_SUBST(LIBHIREDIS_LIBS)\n\nPKG_CHECK_MODULES(CURL, [libcurl >= $CURL_REQUIRED])\nAC_SUBST(CURL_CFLAGS)\nAC_SUBST(CURL_LIBS)\n\nPKG_CHECK_MODULES(JWT, [libjwt])\nAC_SUBST(JWT_CFLAGS)\nAC_SUBST(JWT_LIBS)\n\nPKG_CHECK_MODULES(ARGON2, [libargon2])\nAC_SUBST(ARGON2_CFLAGS)\nAC_SUBST(ARGON2_LIBS)\n\nif test x${compile_python} = xyes; then\n    AM_PATH_PYTHON([2.6])\n    if test \"$bwin32\" = true; then\n        if test x$PYTHON_DIR != x; then\n            # set pyexecdir to somewhere like /c/Python26/Lib/site-packages\n            pyexecdir=${PYTHON_DIR}/Lib/site-packages\n            pythondir=${pyexecdir}\n            pkgpyexecdir=${pyexecdir}/${PACKAGE}\n            pkgpythondir=${pythondir}/${PACKAGE}\n        fi\n    fi\nfi\n\nif test \"${compile_fuse}\" = \"yes\"; then\n   PKG_CHECK_MODULES(FUSE, [fuse >= $FUSE_REQUIRED])\n   AC_SUBST(FUSE_CFLAGS)\n   AC_SUBST(FUSE_LIBS)\nfi\n\ndnl check libarchive\nLIBARCHIVE_REQUIRED=2.8.5\nPKG_CHECK_MODULES(LIBARCHIVE, [libarchive >= $LIBARCHIVE_REQUIRED])\nAC_SUBST(LIBARCHIVE_CFLAGS)\nAC_SUBST(LIBARCHIVE_LIBS)\n\nac_configure_args=\"$ac_configure_args -q\"\n\nAC_CONFIG_FILES(\n    Makefile\n    include/Makefile\n    fuse/Makefile\n    lib/Makefile\n    lib/libseafile.pc\n    common/Makefile\n    common/cdc/Makefile\n    server/Makefile\n    server/gc/Makefile\n    python/Makefile\n    python/seafile/Makefile\n    python/seaserv/Makefile\n    controller/Makefile\n    tools/Makefile\n    doc/Makefile\n    scripts/Makefile\n)\n\nAC_OUTPUT\n"
  },
  {
    "path": "controller/Makefile.am",
    "content": "bin_PROGRAMS = seafile-controller\n\nAM_CFLAGS = \\\n\t-DSEAFILE_SERVER \\\n\t-I$(top_srcdir)/include \\\n\t-I$(top_srcdir)/lib \\\n\t-I$(top_builddir)/lib \\\n\t-I$(top_srcdir)/common \\\n\t@SEARPC_CFLAGS@ \\\n\t@GLIB2_CFLAGS@ \\\n\t-Wall\n\nnoinst_HEADERS = seafile-controller.h ../common/log.h\n\nseafile_controller_SOURCES = seafile-controller.c ../common/log.c\n\nseafile_controller_LDADD = $(top_builddir)/lib/libseafile_common.la \\\n\t@GLIB2_LIBS@  @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ @LIBEVENT_LIBS@ \\\n\t@SEARPC_LIBS@ @JANSSON_LIBS@ @ZLIB_LIBS@\n"
  },
  {
    "path": "controller/seafile-controller.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <sys/types.h>\n#include <sys/wait.h>\n#include <signal.h>\n#include <unistd.h>\n#include <errno.h>\n#include <string.h>\n#include <getopt.h>\n#include <stdbool.h>\n#include <fcntl.h>\n\n#include <glib.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n#include \"seafile-controller.h\"\n\n#define CHECK_PROCESS_INTERVAL 10        /* every 10 seconds */\n\n#if defined(__sun)\n#define PROC_SELF_PATH \"/proc/self/path/a.out\"\n#else\n#define PROC_SELF_PATH \"/proc/self/exe\"\n#endif\n\nSeafileController *ctl;\n\nstatic char *controller_pidfile = NULL;\n\nchar *bin_dir = NULL;\nchar *installpath = NULL;\nchar *topdir = NULL;\ngboolean enabled_go_fileserver = FALSE;\n\nchar *seafile_ld_library_path = NULL;\n\nstatic const char *short_opts = \"hvftc:d:l:g:G:P:F:\";\nstatic const struct option long_opts[] = {\n    { \"help\", no_argument, NULL, 'h', },\n    { \"version\", no_argument, NULL, 'v', },\n    { \"foreground\", no_argument, NULL, 'f', },\n    { \"test\", no_argument, NULL, 't', },\n    { \"config-dir\", required_argument, NULL, 'c', },\n    { \"seafile-dir\", required_argument, NULL, 'd', },\n    { \"central-config-dir\", required_argument, NULL, 'F' },\n    { \"logdir\", required_argument, NULL, 'l', },\n    { \"ccnet-debug-level\", required_argument, NULL, 'g' },\n    { \"seafile-debug-level\", required_argument, NULL, 'G' },\n    { \"pidfile\", required_argument, NULL, 'P' },\n    { NULL, 0, NULL, 0, },\n};\n\nstatic void controller_exit (int code) __attribute__((noreturn));\n\nstatic int read_seafdav_config();\n\nstatic void\ncontroller_exit (int code)\n{\n    if (code != 0) {\n        seaf_warning (\"seaf-controller exited with code %d\\n\", code);\n    }\n    exit(code);\n}\n\n//\n// Utility functions Start\n//\n\n/* returns the pid of the newly created process */\nstatic int\nspawn_process (char *argv[], bool is_python_process)\n{\n    char **ptr = argv;\n    GString *buf = g_string_new(argv[0]);\n    while (*(++ptr)) {\n        g_string_append_printf (buf, \" %s\", *ptr);\n    }\n    seaf_message (\"spawn_process: %s\\n\", buf->str);\n    g_string_free (buf, TRUE);\n\n    int pipefd[2] = {0, 0};\n    if (is_python_process) {\n        if (pipe(pipefd) < 0) {\n            seaf_warning(\"Failed to create pipe.\\n\");\n        }\n        fcntl(pipefd[0], F_SETFL, O_NONBLOCK);\n    }\n\n    pid_t pid = fork();\n\n    if (pid == 0) {\n        if (is_python_process) {\n            if (pipefd[0] > 0 && pipefd[1] > 0) {\n                close(pipefd[0]);\n                dup2(pipefd[1], 2);\n            }\n        }\n        /* child process */\n        execvp (argv[0], argv);\n        seaf_warning (\"failed to execvp %s\\n\", argv[0]);\n        \n        if (pipefd[1] > 0) {\n            close(pipefd[1]);\n        }\n\n        exit(-1);\n    } else {\n        /* controller */\n        if (pid == -1)\n            seaf_warning (\"error when fork %s: %s\\n\", argv[0], strerror(errno));\n        else\n            seaf_message (\"spawned %s, pid %d\\n\", argv[0], pid);\n\n        if (is_python_process) {\n            char child_stderr[1024] = {0};\n            if (pipefd[0] > 0 && pipefd[1] > 0){\n                close(pipefd[1]);\n                sleep(1);\n                while (read(pipefd[0], child_stderr, sizeof(child_stderr)) > 0)\n                    seaf_warning(\"%s\", child_stderr);\n                close(pipefd[0]);\n            }\n        }\n        return (int)pid;\n    }\n}\n\n#define PID_ERROR_ENOENT 0\n#define PID_ERROR_OTHER  -1\n\n/**\n * @return\n * - pid if successfully opened and read the file\n * - PID_ERROR_ENOENT if file not exists,\n * - PID_ERROR_OTHER if other errors\n */\nstatic int\nread_pid_from_pidfile (const char *pidfile)\n{\n    FILE *pf = g_fopen (pidfile, \"r\");\n    if (!pf) {\n        if (errno == ENOENT) {\n            return PID_ERROR_ENOENT;\n        } else {\n            return PID_ERROR_OTHER;\n        }\n    }\n\n    int pid = PID_ERROR_OTHER;\n    if (fscanf (pf, \"%d\", &pid) < 0) {\n        seaf_warning (\"bad pidfile format: %s\\n\", pidfile);\n        fclose(pf);\n        return PID_ERROR_OTHER;\n    }\n\n    fclose(pf);\n\n    return pid;\n}\n\nstatic void\nkill_by_force (int which)\n{\n    if (which < 0 || which >= N_PID)\n        return;\n\n    char *pidfile = ctl->pidfile[which];\n    int pid = read_pid_from_pidfile(pidfile);\n    if (pid > 0) {\n        // if SIGKILL send success, then remove related pid file\n        if (kill ((pid_t)pid, SIGKILL) == 0) {\n            g_unlink (pidfile);\n        }\n    }\n}\n\n//\n// Utility functions End\n//\n\nstatic int\nstart_seaf_server ()\n{\n    if (!ctl->config_dir || !ctl->seafile_dir)\n        return -1;\n\n    seaf_message (\"starting seaf-server ...\\n\");\n    static char *logfile = NULL;\n    if (logfile == NULL) {\n        logfile = g_build_filename (ctl->logdir, \"seafile.log\", NULL);\n    }\n\n    char *argv[] = {\n        \"seaf-server\",\n        \"-F\", ctl->central_config_dir,\n        \"-c\", ctl->config_dir,\n        \"-d\", ctl->seafile_dir,\n        \"-l\", logfile,\n        \"-P\", ctl->pidfile[PID_SERVER],\n        \"-p\", ctl->rpc_pipe_path,\n        NULL};\n    int pid = spawn_process (argv, false);\n    if (pid <= 0) {\n        seaf_warning (\"Failed to spawn seaf-server\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic int\nstart_go_fileserver()\n{\n    if (!ctl->central_config_dir || !ctl->seafile_dir)\n        return -1;\n\n    static char *logfile = NULL;\n    if (logfile == NULL) {\n        logfile = g_build_filename (ctl->logdir, \"fileserver.log\", NULL);\n    }\n\n    char *argv[] = {\n        \"fileserver\",\n        \"-F\", ctl->central_config_dir,\n        \"-d\", ctl->seafile_dir,\n        \"-l\", logfile,\n        \"-p\", ctl->rpc_pipe_path,\n        \"-P\", ctl->pidfile[PID_FILESERVER],\n        NULL};\n\n    seaf_message (\"starting go-fileserver ...\");\n    int pid = spawn_process(argv, false);\n\n    if (pid <= 0) {\n        seaf_warning(\"Failed to spawn fileserver\\n\");\n        return -1;\n    }\n    return 0;\n}\n\nstatic const char *\nget_python_executable() {\n    static const char *python = NULL;\n    if (python != NULL) {\n        return python;\n    }\n\n    static const char *try_list[] = {\n        \"python3\"\n    };\n\n    int i;\n    for (i = 0; i < G_N_ELEMENTS(try_list); i++) {\n        char *binary = g_find_program_in_path (try_list[i]);\n        if (binary != NULL) {\n            python = binary;\n            break;\n        }\n    }\n\n    if (python == NULL) {\n        python = g_getenv (\"PYTHON\");\n        if (python == NULL) {\n            python = \"python\";\n        }\n    }\n\n    return python;\n}\n\nstatic void\ninit_seafile_path ()\n{\n    GError *error = NULL;\n    char *binary = g_file_read_link (PROC_SELF_PATH, &error);\n    char *tmp = NULL;\n    if (error != NULL) {\n        seaf_warning (\"failed to readlink: %s\\n\", error->message);\n        return;\n    }\n\n    bin_dir = g_path_get_dirname (binary);\n\n    tmp = g_path_get_dirname (bin_dir);\n    installpath = g_path_get_dirname (tmp);\n\n    topdir = g_path_get_dirname (installpath);\n\n    g_free (binary);\n    g_free (tmp);\n}\n\nstatic void\nsetup_python_path()\n{\n    static GList *path_list = NULL;\n    if (path_list != NULL) {\n        /* Only setup once */\n        return;\n    }\n\n    /* Allow seafdav to access seahub_settings.py */\n    path_list = g_list_prepend (path_list, g_build_filename (topdir, \"conf\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seahub\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seahub/thirdpart\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seahub/seahub-extra\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seahub/seahub-extra/thirdparts\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seafile/lib/python3/site-packages\", NULL));\n\n    path_list = g_list_prepend (path_list,\n        g_build_filename (installpath, \"seafile/lib64/python3/site-packages\", NULL));\n\n    path_list = g_list_reverse (path_list);\n\n    GList *ptr;\n    GString *new_pypath = g_string_new (g_getenv(\"PYTHONPATH\"));\n\n    for (ptr = path_list; ptr != NULL; ptr = ptr->next) {\n        const char *path = (char *)ptr->data;\n\n        g_string_append_c (new_pypath, ':');\n        g_string_append (new_pypath, path);\n    }\n\n    g_setenv (\"PYTHONPATH\", g_string_free (new_pypath, FALSE), TRUE);\n\n    /* seaf_message (\"PYTHONPATH is:\\n\\n%s\\n\", g_getenv (\"PYTHONPATH\")); */\n}\n\nstatic void\nsetup_env ()\n{\n    g_setenv (\"CCNET_CONF_DIR\", ctl->config_dir, TRUE);\n    g_setenv (\"SEAFILE_CONF_DIR\", ctl->seafile_dir, TRUE);\n    g_setenv (\"SEAFILE_CENTRAL_CONF_DIR\", ctl->central_config_dir, TRUE);\n    g_setenv (\"SEAFILE_RPC_PIPE_PATH\", ctl->rpc_pipe_path, TRUE);\n\n    char *seahub_dir = g_build_filename (installpath, \"seahub\", NULL);\n    char *seafdav_conf = g_build_filename (ctl->central_config_dir, \"seafdav.conf\", NULL);\n    g_setenv (\"SEAHUB_DIR\", seahub_dir, TRUE);\n    g_setenv (\"SEAFDAV_CONF\", seafdav_conf, TRUE);\n\n    setup_python_path();\n}\n\nstatic int\nstart_seafdav() {\n    static char *seafdav_log_file = NULL;\n    if (seafdav_log_file == NULL)\n        seafdav_log_file = g_build_filename (ctl->logdir,\n                                             \"seafdav.log\",\n                                             NULL);\n\n    SeafDavConfig conf = ctl->seafdav_config;\n    char port[16];\n    snprintf (port, sizeof(port), \"%d\", conf.port);\n\n    int pid;\n    if (conf.debug_mode) {\n        char *argv[] = {\n            (char *)get_python_executable(),\n            \"-m\", \"wsgidav.server.server_cli\",\n            \"--server\", \"gunicorn\",\n            \"--root\", \"/\",\n            \"--log-file\", seafdav_log_file, \n            \"--pid\", ctl->pidfile[PID_SEAFDAV],\n            \"--port\", port,\n            \"--host\", conf.host,\n            \"-v\",\n            NULL\n        };\n        pid = spawn_process (argv, true);\n    } else {\n        char *argv[] = {\n            (char *)get_python_executable(),\n            \"-m\", \"wsgidav.server.server_cli\",\n            \"--server\", \"gunicorn\",\n            \"--root\", \"/\",\n            \"--log-file\", seafdav_log_file, \n            \"--pid\", ctl->pidfile[PID_SEAFDAV],\n            \"--port\", port,\n            \"--host\", conf.host,\n            NULL\n        };\n        pid = spawn_process (argv, true);\n    }\n\n    if (pid <= 0) {\n        seaf_warning (\"Failed to spawn seafdav\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nstatic void\nrun_controller_loop ()\n{\n    GMainLoop *mainloop = g_main_loop_new (NULL, FALSE);\n\n    g_main_loop_run (mainloop);\n}\n\nstatic gboolean\nneed_restart (int which)\n{\n    if (which < 0 || which >= N_PID)\n        return FALSE;\n\n    int pid = read_pid_from_pidfile (ctl->pidfile[which]);\n    if (pid == PID_ERROR_ENOENT) {\n        seaf_warning (\"pid file %s does not exist\\n\", ctl->pidfile[which]);\n        return TRUE;\n    } else if (pid == PID_ERROR_OTHER) {\n        seaf_warning (\"failed to read pidfile %s: %s\\n\", ctl->pidfile[which], strerror(errno));\n        return FALSE;\n    } else {\n        char buf[256];\n        snprintf (buf, sizeof(buf), \"/proc/%d\", pid);\n        if (g_file_test (buf, G_FILE_TEST_IS_DIR)) {\n            return FALSE;\n        } else {\n            seaf_warning (\"path /proc/%d doesn't exist, restart progress [%d]\\n\", pid, which);\n            return TRUE;\n        }\n    }\n}\n\nstatic gboolean\nshould_start_go_fileserver()\n{\n    char *seafile_conf = g_build_filename (ctl->central_config_dir, \"seafile.conf\", NULL);\n    GKeyFile *key_file = g_key_file_new ();\n    gboolean ret = 0;\n\n    if (!g_key_file_load_from_file (key_file, seafile_conf,\n                                    G_KEY_FILE_KEEP_COMMENTS, NULL)) {\n        seaf_warning(\"Failed to load seafile.conf.\\n\");\n        ret = FALSE;\n        goto out;\n    }\n    GError *err = NULL;\n    gboolean enabled;\n    enabled = g_key_file_get_boolean(key_file, \"fileserver\", \"use_go_fileserver\", &err);\n    if (err) {\n        seaf_warning(\"Config [fileserver, use_go_fileserver] not set, default is FALSE.\\n\");\n        ret = FALSE;\n        g_clear_error(&err);\n    } else {\n        if (enabled) {\n            ret = TRUE;\n        } else {\n            ret = FALSE;\n        }\n    }\n\n    if (ret) {\n        char *type = NULL;\n        type = g_key_file_get_string (key_file, \"database\", \"type\", NULL);\n        if (!type || g_strcmp0 (type, \"mysql\") != 0) {\n            seaf_message (\"Use C fileserver because go fileserver does not support sqlite.\");\n            ret = FALSE;\n        }\n        g_free (type);\n    }\n\nout:\n    g_key_file_free (key_file);\n    g_free (seafile_conf);\n\n    return ret;\n}\n\nstatic gboolean\ncheck_process (void *data)\n{\n    if (need_restart(PID_SERVER)) {\n        seaf_message (\"seaf-server need restart...\\n\");\n        start_seaf_server();\n    }\n\n    if (enabled_go_fileserver) {\n        if (need_restart(PID_FILESERVER)) {\n            seaf_message(\"fileserver need restart...\\n\");\n            start_go_fileserver();\n        }\n    }\n\n    if (ctl->seafdav_config.enabled) {\n        if (need_restart(PID_SEAFDAV)) {\n            seaf_message (\"seafdav need restart...\\n\");\n            start_seafdav ();\n        }\n    }\n\n    return TRUE;\n}\n\nstatic void\nstart_process_monitor ()\n{\n    ctl->check_process_timer = g_timeout_add (\n        CHECK_PROCESS_INTERVAL * 1000, check_process, NULL);\n}\n\nstatic int seaf_controller_start ();\n/* This would also stop seaf-server & other components */\nstatic void\nstop_services ()\n{\n    seaf_message (\"shutting down all services ...\\n\");\n\n    kill_by_force(PID_SERVER);\n    kill_by_force(PID_FILESERVER);\n    kill_by_force(PID_SEAFDAV);\n}\n\nstatic void\ninit_pidfile_path (SeafileController *ctl)\n{\n    char *pid_dir = g_build_filename (topdir, \"pids\", NULL);\n    if (!g_file_test(pid_dir, G_FILE_TEST_EXISTS)) {\n        if (g_mkdir(pid_dir, 0777) < 0) {\n            seaf_warning(\"failed to create pid dir %s: %s\", pid_dir, strerror(errno));\n            controller_exit(1);\n        }\n    }\n\n    ctl->pidfile[PID_SERVER] = g_build_filename (pid_dir, \"seaf-server.pid\", NULL);\n    ctl->pidfile[PID_SEAFDAV] = g_build_filename (pid_dir, \"seafdav.pid\", NULL);\n    ctl->pidfile[PID_FILESERVER] = g_build_filename (pid_dir, \"fileserver.pid\", NULL);\n}\n\nstatic int\nseaf_controller_init (SeafileController *ctl,\n                      char *central_config_dir,\n                      char *config_dir,\n                      char *seafile_dir,\n                      char *logdir)\n{\n    init_seafile_path ();\n    if (!g_file_test (config_dir, G_FILE_TEST_IS_DIR)) {\n        seaf_warning (\"invalid config_dir: %s\\n\", config_dir);\n        return -1;\n    }\n\n    if (!g_file_test (seafile_dir, G_FILE_TEST_IS_DIR)) {\n        seaf_warning (\"invalid seafile_dir: %s\\n\", seafile_dir);\n        return -1;\n    }\n\n    if (logdir == NULL) {\n        char *topdir = g_path_get_dirname(config_dir);\n        logdir = g_build_filename (topdir, \"logs\", NULL);\n        if (checkdir_with_mkdir(logdir) < 0) {\n            seaf_error (\"failed to create log folder \\\"%s\\\": %s\\n\",\n                        logdir, strerror(errno));\n            return -1;\n        }\n        g_free (topdir);\n    }\n\n    ctl->central_config_dir = central_config_dir;\n    ctl->config_dir = config_dir;\n    ctl->seafile_dir = seafile_dir;\n    ctl->rpc_pipe_path = g_build_filename (installpath, \"runtime\", NULL);\n    ctl->logdir = logdir;\n\n    if (read_seafdav_config() < 0) {\n        return -1;\n    }\n\n    init_pidfile_path (ctl);\n    setup_env ();\n\n    return 0;\n}\n\nstatic int\nseaf_controller_start ()\n{\n    if (start_seaf_server() < 0) {\n        seaf_warning (\"Failed to start seaf server\\n\");\n        return -1;\n    }\n\n    if (enabled_go_fileserver) {\n        if (start_go_fileserver() < 0) {\n            seaf_warning (\"Failed to start fileserver\\n\");\n            return -1;\n        }\n    }\n\n    start_process_monitor ();\n    return 0;\n}\n\nstatic int\nwrite_controller_pidfile ()\n{\n    if (!controller_pidfile)\n        return -1;\n\n    pid_t pid = getpid();\n\n    FILE *pidfile = g_fopen(controller_pidfile, \"w\");\n    if (!pidfile) {\n        seaf_warning (\"Failed to fopen() pidfile %s: %s\\n\",\n                      controller_pidfile, strerror(errno));\n        return -1;\n    }\n\n    char buf[32];\n    snprintf (buf, sizeof(buf), \"%d\\n\", pid);\n    if (fputs(buf, pidfile) < 0) {\n        seaf_warning (\"Failed to write pidfile %s: %s\\n\",\n                      controller_pidfile, strerror(errno));\n        fclose (pidfile);\n        return -1;\n    }\n\n    fflush (pidfile);\n    fclose (pidfile);\n    return 0;\n}\n\nstatic void\nremove_controller_pidfile ()\n{\n    if (controller_pidfile) {\n        g_unlink (controller_pidfile);\n    }\n}\n\nstatic void\nsigint_handler (int signo)\n{\n    stop_services ();\n\n    remove_controller_pidfile();\n\n    signal (signo, SIG_DFL);\n    raise (signo);\n}\n\nstatic void\nsigchld_handler (int signo)\n{\n    waitpid (-1, NULL, WNOHANG);\n}\n\nstatic void\nsigusr1_handler (int signo)\n{\n    seafile_log_reopen();\n}\n\nstatic void\nset_signal_handlers ()\n{\n    signal (SIGINT, sigint_handler);\n    signal (SIGTERM, sigint_handler);\n    signal (SIGCHLD, sigchld_handler);\n    signal (SIGUSR1, sigusr1_handler);\n    signal (SIGPIPE, SIG_IGN);\n}\n\nstatic void\nusage ()\n{\n    fprintf (stderr, \"Usage: seafile-controller OPTIONS\\n\"\n                     \"OPTIONS:\\n\"\n                     \"  -b, --bin-dir           insert a directory in front of the PATH env\\n\"\n                     \"  -c, --config-dir        ccnet config dir\\n\"\n                     \"  -d, --seafile-dir       seafile dir\\n\"\n                     );\n}\n\n/* seafile-controller -t is used to test whether config file is valid */\nstatic void\ntest_config (const char *central_config_dir,\n             const char *ccnet_dir,\n             const char *seafile_dir)\n{\n    char buf[1024];\n    GError *error = NULL;\n    int retcode = 0;\n    char *child_stdout = NULL;\n    char *child_stderr = NULL;\n\n    snprintf (buf,\n          sizeof(buf),\n          \"seaf-server -F \\\"%s\\\" -c \\\"%s\\\" -d \\\"%s\\\" -t -f\",\n          central_config_dir,\n          ccnet_dir,\n          seafile_dir);\n\n    g_spawn_command_line_sync (buf,\n                               &child_stdout,\n                               &child_stderr,\n                               &retcode,\n                               &error);\n\n    if (error != NULL) {\n        seaf_error (\"failed to run \\\"seaf-server -t\\\": %s\\n\",\n                    error->message);\n        exit (1);\n    }\n\n    if (child_stdout) {\n        fputs (child_stdout, stdout);\n    }\n\n    if (child_stderr) {\n        fputs (child_stderr, stdout);\n    }\n\n    if (retcode != 0) {\n        seaf_error (\"failed to run \\\"seaf-server -t\\\" [%d]\\n\", retcode);\n        exit (1);\n    }\n\n    exit(0);\n}\n\nstatic int\nread_seafdav_config()\n{\n    int ret = 0;\n    char *seafdav_conf = NULL;\n    GKeyFile *key_file = NULL;\n    GError *error = NULL;\n\n    seafdav_conf = g_build_filename(ctl->central_config_dir, \"seafdav.conf\", NULL);\n    if (!g_file_test(seafdav_conf, G_FILE_TEST_EXISTS)) {\n        goto out;\n    }\n\n    key_file = g_key_file_new ();\n    if (!g_key_file_load_from_file (key_file, seafdav_conf,\n                                    G_KEY_FILE_KEEP_COMMENTS, NULL)) {\n        seaf_warning(\"Failed to load seafdav.conf\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    /* enabled */\n    ctl->seafdav_config.enabled = g_key_file_get_boolean(key_file, \"WEBDAV\", \"enabled\", &error);\n    if (error != NULL) {\n        if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {\n            seaf_message (\"Error when reading WEBDAV.enabled, use default value 'false'\\n\");\n        }\n        ctl->seafdav_config.enabled = FALSE;\n        g_clear_error (&error);\n        goto out;\n    }\n\n    if (!ctl->seafdav_config.enabled) {\n        goto out;\n    }\n\n    /* host */\n    char *host = seaf_key_file_get_string (key_file, \"WEBDAV\", \"host\", &error);\n    if (error != NULL) {\n        g_clear_error(&error);\n        ctl->seafdav_config.host = g_strdup(\"0.0.0.0\");\n    } else {\n        ctl->seafdav_config.host = host;\n    }\n\n    /* port */\n    ctl->seafdav_config.port = g_key_file_get_integer(key_file, \"WEBDAV\", \"port\", &error);\n    if (error != NULL) {\n        if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {\n            seaf_message (\"Error when reading WEBDAV.port, use deafult value 8080\\n\");\n        }\n        ctl->seafdav_config.port = 8080;\n        g_clear_error (&error);\n    }\n\n    ctl->seafdav_config.debug_mode = g_key_file_get_boolean (key_file, \"WEBDAV\", \"debug\", &error);\n    if (error != NULL) {\n        if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND) {\n            seaf_message (\"Error when reading WEBDAV.debug, use deafult value FALSE\\n\");\n        }\n        ctl->seafdav_config.debug_mode = FALSE;\n        g_clear_error (&error);\n    }\n\n    if (ctl->seafdav_config.port <= 0 || ctl->seafdav_config.port > 65535) {\n        seaf_warning(\"Failed to load seafdav config: invalid port %d\\n\", ctl->seafdav_config.port);\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (key_file) {\n        g_key_file_free (key_file);\n    }\n    g_free (seafdav_conf);\n\n    return ret;\n}\n\nstatic int\ninit_syslog_config ()\n{\n    char *seafile_conf = g_build_filename (ctl->central_config_dir, \"seafile.conf\", NULL);\n    GKeyFile *key_file = g_key_file_new ();\n    int ret = 0;\n\n    if (!g_key_file_load_from_file (key_file, seafile_conf,\n                                    G_KEY_FILE_KEEP_COMMENTS, NULL)) {\n        seaf_warning(\"Failed to load seafile.conf.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    set_syslog_config (key_file);\n\nout:\n    g_key_file_free (key_file);\n    g_free (seafile_conf);\n\n    return ret;\n}\n\n\nint main (int argc, char **argv)\n{\n    if (argc <= 1) {\n        usage ();\n        exit (1);\n    }\n\n    char *config_dir = DEFAULT_CONFIG_DIR;\n    char *central_config_dir = NULL;\n    char *seafile_dir = NULL;\n    char *logdir = NULL;\n    char *ccnet_debug_level_str = \"info\";\n    char *seafile_debug_level_str = \"debug\";\n    int daemon_mode = 1;\n    gboolean test_conf = FALSE;\n\n    int c;\n    while ((c = getopt_long (argc, argv, short_opts,\n                             long_opts, NULL)) != EOF)\n    {\n        switch (c) {\n        case 'h':\n            usage ();\n            exit(1);\n            break;\n        case 'v':\n            fprintf (stderr, \"seafile-controller version 1.0\\n\");\n            exit(1);\n            break;\n        case 't':\n            test_conf = TRUE;\n            break;\n        case 'c':\n            config_dir = optarg;\n            break;\n        case 'F':\n            central_config_dir = g_strdup(optarg);\n            break;\n        case 'd':\n            seafile_dir = g_strdup(optarg);\n            break;\n        case 'f':\n            daemon_mode = 0;\n            break;\n        case 'L':\n            logdir = g_strdup(optarg);\n            break;\n        case 'g':\n            ccnet_debug_level_str = optarg;\n            break;\n        case 'G':\n            seafile_debug_level_str = optarg;\n            break;\n        case 'P':\n            controller_pidfile = optarg;\n            break;\n        default:\n            usage ();\n            exit (1);\n        }\n    }\n\n#if !GLIB_CHECK_VERSION(2, 35, 0)\n    g_type_init();\n#endif\n#if !GLIB_CHECK_VERSION(2,32,0)\n    g_thread_init (NULL);\n#endif\n\n    if (!seafile_dir) {\n        fprintf (stderr, \"<seafile_dir> must be specified with --seafile-dir\\n\");\n        exit(1);\n    }\n\n    if (!central_config_dir) {\n        fprintf (stderr, \"<central_config_dir> must be specified with --central-config-dir\\n\");\n        exit(1);\n    }\n\n    central_config_dir = ccnet_expand_path (central_config_dir);\n    config_dir = ccnet_expand_path (config_dir);\n    seafile_dir = ccnet_expand_path (seafile_dir);\n\n    if (test_conf) {\n        test_config (central_config_dir, config_dir, seafile_dir);\n    }\n\n    ctl = g_new0 (SeafileController, 1);\n    if (seaf_controller_init (ctl, central_config_dir, config_dir, seafile_dir, logdir) < 0) {\n        controller_exit(1);\n    }\n\n    char *logfile = g_build_filename (ctl->logdir, \"controller.log\", NULL);\n    if (seafile_log_init (logfile, ccnet_debug_level_str,\n                          seafile_debug_level_str, \"seafile-controller\") < 0) {\n        fprintf (stderr, \"Failed to init log.\\n\");\n        controller_exit (1);\n    }\n\n    if (init_syslog_config () < 0) {\n        controller_exit (1);\n    }\n\n    set_signal_handlers ();\n\n    enabled_go_fileserver = should_start_go_fileserver();\n\n    if (seaf_controller_start () < 0)\n        controller_exit (1);\n\n    const char *log_to_stdout_env = g_getenv(\"SEAFILE_LOG_TO_STDOUT\");\n    if (g_strcmp0(log_to_stdout_env, \"true\") == 0) {\n        daemon_mode = 0;\n    }\n\n#ifndef WIN32\n    if (daemon_mode) {\n#ifndef __APPLE__\n        daemon (1, 0);\n#else   /* __APPLE */\n        /* daemon is deprecated under APPLE\n         * use fork() instead\n         * */\n        switch (fork ()) {\n          case -1:\n              seaf_warning (\"Failed to daemonize\");\n              exit (-1);\n              break;\n          case 0:\n              /* all good*/\n              break;\n          default:\n              /* kill origin process */\n              exit (0);\n        }\n#endif  /* __APPLE */\n    }\n#endif /* !WIN32 */\n\n    if (controller_pidfile == NULL) {\n        controller_pidfile = g_strdup(g_getenv (\"SEAFILE_PIDFILE\"));\n    }\n\n    if (controller_pidfile != NULL) {\n        if (write_controller_pidfile () < 0) {\n            seaf_warning (\"Failed to write pidfile %s\\n\", controller_pidfile);\n            return -1;\n        }\n    }\n\n    run_controller_loop ();\n\n    return 0;\n}\n\n"
  },
  {
    "path": "controller/seafile-controller.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n/*\n * Seafile-controller is responsible for:\n *\n *    1. Start: start server processes:\n *\n *       - ccnet-server\n *       - seaf-server\n *       - seaf-mon\n *\n *    2. Repair:\n *\n *       - ensure ccnet process availability by watching client->connfd\n *       - ensure server processes availablity by checking process is running periodically\n *         If some process has stopped working, try to restart it.\n *\n */\n\n#ifndef SEAFILE_CONTROLLER_H\n#define SEAFILE_CONTROLLER_H\n\ntypedef struct _SeafileController SeafileController;\n\nenum {\n    PID_CCNET = 0,\n    PID_SERVER,\n    PID_FILESERVER,\n    PID_SEAFDAV,\n    PID_SEAFEVENTS,\n    N_PID\n};\n\ntypedef struct SeafDavConfig {\n    gboolean enabled;\n    int port;\n    char *host;\n    gboolean debug_mode;\n\n} SeafDavConfig;\n\nstruct _SeafileController {\n    char *central_config_dir;\n    char *config_dir;\n    char *seafile_dir;\n    char *rpc_pipe_path;\n    char *logdir;\n\n    guint               check_process_timer;\n    guint               client_io_id;\n    /* Decide whether to start seaf-server in cloud mode  */\n    gboolean            cloud_mode;\n\n    int                 pid[N_PID];\n    char                *pidfile[N_PID];\n\n    SeafDavConfig       seafdav_config;\n\n    gboolean            has_seafevents;\n};\n#endif\n"
  },
  {
    "path": "doc/Makefile.am",
    "content": "EXTRA_DIST = seafile-tutorial.doc\n"
  },
  {
    "path": "fileserver/.golangci.yml",
    "content": "run:\n  timeout: 2m\n\nlinters:\n  enable:\n   - govet\n   - gosimple\n   - ineffassign\n   - staticcheck\n   - unused\n   - gofmt\n  disable:\n   - errcheck\n"
  },
  {
    "path": "fileserver/blockmgr/blockmgr.go",
    "content": "// Package blockmgr provides operations on blocks\npackage blockmgr\n\nimport (\n\t\"github.com/haiwen/seafile-server/fileserver/objstore\"\n\t\"io\"\n)\n\nvar store *objstore.ObjectStore\n\n// Init initializes block manager and creates underlying object store.\nfunc Init(seafileConfPath string, seafileDataDir string) {\n\tstore = objstore.New(seafileConfPath, seafileDataDir, \"blocks\")\n}\n\n// Read reads block from storage backend.\nfunc Read(repoID string, blockID string, w io.Writer) error {\n\terr := store.Read(repoID, blockID, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Write writes block to storage backend.\nfunc Write(repoID string, blockID string, r io.Reader) error {\n\terr := store.Write(repoID, blockID, r, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Exists checks block if exists.\nfunc Exists(repoID string, blockID string) bool {\n\tret, _ := store.Exists(repoID, blockID)\n\treturn ret\n}\n\n// Stat calculates block size.\nfunc Stat(repoID string, blockID string) (int64, error) {\n\tret, err := store.Stat(repoID, blockID)\n\treturn ret, err\n}\n"
  },
  {
    "path": "fileserver/blockmgr/blockmgr_test.go",
    "content": "package blockmgr\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n)\n\nconst (\n\tblockID         = \"0401fc662e3bc87a41f299a907c056aaf8322a27\"\n\trepoID          = \"b1f2ad61-9164-418a-a47f-ab805dbd5694\"\n\tseafileConfPath = \"/tmp/conf\"\n\tseafileDataDir  = \"/tmp/conf/seafile-data\"\n\ttestFile        = \"output.data\"\n)\n\nfunc delFile() error {\n\terr := os.Remove(testFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.RemoveAll(seafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc createFile() error {\n\toutputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outputFile.Close()\n\n\toutputString := \"hello world!\\n\"\n\tfor i := 0; i < 10; i++ {\n\t\toutputFile.WriteString(outputString)\n\t}\n\n\treturn nil\n}\n\nfunc TestMain(m *testing.M) {\n\terr := createFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to create test file : %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tcode := m.Run()\n\terr = delFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to remove test file : %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(code)\n}\n\nfunc testBlockRead(t *testing.T) {\n\tvar buf bytes.Buffer\n\terr := Read(repoID, blockID, &buf)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to read block.\\n\")\n\t}\n}\n\nfunc testBlockWrite(t *testing.T) {\n\tinputFile, err := os.Open(testFile)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to open test file : %v\\n\", err)\n\t}\n\tdefer inputFile.Close()\n\n\terr = Write(repoID, blockID, inputFile)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to write block.\\n\")\n\t}\n}\n\nfunc testBlockExists(t *testing.T) {\n\tret := Exists(repoID, blockID)\n\tif !ret {\n\t\tt.Errorf(\"Block is not exist\\n\")\n\t}\n\n\tfilePath := path.Join(seafileDataDir, \"storage\", \"blocks\", repoID, blockID[:2], blockID[2:])\n\tfileInfo, _ := os.Stat(filePath)\n\tif fileInfo.Size() != 130 {\n\t\tt.Errorf(\"Block is exist, but the size of file is incorrect.\\n\")\n\t}\n\n}\n\nfunc TestBlock(t *testing.T) {\n\tInit(seafileConfPath, seafileDataDir)\n\ttestBlockWrite(t)\n\ttestBlockRead(t)\n\ttestBlockExists(t)\n}\n"
  },
  {
    "path": "fileserver/commitmgr/commitmgr.go",
    "content": "// Package commitmgr manages commit objects.\npackage commitmgr\n\nimport (\n\t\"bytes\"\n\t\"crypto/sha1\"\n\t\"encoding/binary\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/objstore\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n)\n\n// Commit is a commit object\ntype Commit struct {\n\tCommitID       string `json:\"commit_id\"`\n\tRepoID         string `json:\"repo_id\"`\n\tRootID         string `json:\"root_id\"`\n\tCreatorName    string `json:\"creator_name,omitempty\"`\n\tCreatorID      string `json:\"creator\"`\n\tDesc           string `json:\"description\"`\n\tCtime          int64  `json:\"ctime\"`\n\tParentID       String `json:\"parent_id\"`\n\tSecondParentID String `json:\"second_parent_id\"`\n\tRepoName       string `json:\"repo_name\"`\n\tRepoDesc       string `json:\"repo_desc\"`\n\tRepoCategory   string `json:\"repo_category\"`\n\tDeviceName     string `json:\"device_name,omitempty\"`\n\tClientVersion  string `json:\"client_version,omitempty\"`\n\tEncrypted      string `json:\"encrypted,omitempty\"`\n\tEncVersion     int    `json:\"enc_version,omitempty\"`\n\tMagic          string `json:\"magic,omitempty\"`\n\tRandomKey      string `json:\"key,omitempty\"`\n\tSalt           string `json:\"salt,omitempty\"`\n\tPwdHash        string `json:\"pwd_hash,omitempty\"`\n\tPwdHashAlgo    string `json:\"pwd_hash_algo,omitempty\"`\n\tPwdHashParams  string `json:\"pwd_hash_params,omitempty\"`\n\tVersion        int    `json:\"version,omitempty\"`\n\tConflict       int    `json:\"conflict,omitempty\"`\n\tNewMerge       int    `json:\"new_merge,omitempty\"`\n\tRepaired       int    `json:\"repaired,omitempty\"`\n}\n\nvar store *objstore.ObjectStore\n\n// Init initializes commit manager and creates underlying object store.\nfunc Init(seafileConfPath string, seafileDataDir string) {\n\tstore = objstore.New(seafileConfPath, seafileDataDir, \"commits\")\n}\n\n// NewCommit initializes a Commit object.\nfunc NewCommit(repoID, parentID, newRoot, user, desc string) *Commit {\n\tcommit := new(Commit)\n\tcommit.RepoID = repoID\n\tcommit.RootID = newRoot\n\tcommit.Desc = desc\n\tcommit.CreatorName = user\n\tcommit.CreatorID = \"0000000000000000000000000000000000000000\"\n\tcommit.Ctime = time.Now().Unix()\n\tcommit.CommitID = computeCommitID(commit)\n\tif parentID != \"\" {\n\t\tcommit.ParentID.SetValid(parentID)\n\t}\n\n\treturn commit\n}\n\nfunc computeCommitID(commit *Commit) string {\n\thash := sha1.New()\n\thash.Write([]byte(commit.RootID))\n\thash.Write([]byte(commit.CreatorID))\n\thash.Write([]byte(commit.CreatorName))\n\thash.Write([]byte(commit.Desc))\n\ttmpBuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(tmpBuf, uint64(commit.Ctime))\n\thash.Write(tmpBuf)\n\n\tcheckSum := hash.Sum(nil)\n\tid := hex.EncodeToString(checkSum[:])\n\n\treturn id\n}\n\n// FromData reads from p and converts JSON-encoded data to commit.\nfunc (commit *Commit) FromData(p []byte) error {\n\terr := json.Unmarshal(p, commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !utils.IsValidUUID(commit.RepoID) {\n\t\treturn fmt.Errorf(\"repo id %s is invalid\", commit.RepoID)\n\t}\n\tif !utils.IsObjectIDValid(commit.RootID) {\n\t\treturn fmt.Errorf(\"root id %s is invalid\", commit.RootID)\n\t}\n\tif len(commit.CreatorID) != 40 {\n\t\treturn fmt.Errorf(\"creator id %s is invalid\", commit.CreatorID)\n\t}\n\tif commit.ParentID.Valid && !utils.IsObjectIDValid(commit.ParentID.String) {\n\t\treturn fmt.Errorf(\"parent id %s is invalid\", commit.ParentID.String)\n\t}\n\tif commit.SecondParentID.Valid && !utils.IsObjectIDValid(commit.SecondParentID.String) {\n\t\treturn fmt.Errorf(\"second parent id %s is invalid\", commit.SecondParentID.String)\n\t}\n\n\treturn nil\n}\n\n// ToData converts commit to JSON-encoded data and writes to w.\nfunc (commit *Commit) ToData(w io.Writer) error {\n\tjsonstr, err := json.Marshal(commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = w.Write(jsonstr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ReadRaw reads data in binary format from storage backend.\nfunc ReadRaw(repoID string, commitID string, w io.Writer) error {\n\terr := store.Read(repoID, commitID, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// WriteRaw writes data in binary format to storage backend.\nfunc WriteRaw(repoID string, commitID string, r io.Reader) error {\n\terr := store.Write(repoID, commitID, r, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Load commit from storage backend.\nfunc Load(repoID string, commitID string) (*Commit, error) {\n\tvar buf bytes.Buffer\n\tcommit := new(Commit)\n\terr := ReadRaw(repoID, commitID, &buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = commit.FromData(buf.Bytes())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn commit, nil\n}\n\n// Save commit to storage backend.\nfunc Save(commit *Commit) error {\n\tvar buf bytes.Buffer\n\terr := commit.ToData(&buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = WriteRaw(commit.RepoID, commit.CommitID, &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\n// Exists checks commit if exists.\nfunc Exists(repoID string, commitID string) (bool, error) {\n\treturn store.Exists(repoID, commitID)\n}\n"
  },
  {
    "path": "fileserver/commitmgr/commitmgr_test.go",
    "content": "package commitmgr\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tcommitID        = \"0401fc662e3bc87a41f299a907c056aaf8322a27\"\n\trootID          = \"6a1608dc2a1248838464e9b194800d35252e2ce3\"\n\trepoID          = \"b1f2ad61-9164-418a-a47f-ab805dbd5694\"\n\tseafileConfPath = \"/tmp/conf\"\n\tseafileDataDir  = \"/tmp/conf/seafile-data\"\n)\n\nfunc delFile() error {\n\terr := os.RemoveAll(seafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc TestMain(m *testing.M) {\n\tcode := m.Run()\n\terr := delFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to remove test file : %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(code)\n}\n\nfunc assertEqual(t *testing.T, a, b interface{}) {\n\tif a != b {\n\t\tt.Errorf(\"Not Equal.%t,%t\", a, b)\n\t}\n}\n\nfunc TestCommit(t *testing.T) {\n\tInit(seafileConfPath, seafileDataDir)\n\tnewCommit := new(Commit)\n\tnewCommit.CommitID = commitID\n\tnewCommit.RepoID = repoID\n\tnewCommit.RootID = rootID\n\tnewCommit.CreatorName = \"seafile\"\n\tnewCommit.CreatorID = commitID\n\tnewCommit.Desc = \"This is a commit\"\n\tnewCommit.Ctime = time.Now().Unix()\n\tnewCommit.ParentID.SetValid(commitID)\n\tnewCommit.DeviceName = \"Linux\"\n\terr := Save(newCommit)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to save commit.\\n\")\n\t}\n\n\tcommit, err := Load(repoID, commitID)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to load commit: %v.\\n\", err)\n\t}\n\tassertEqual(t, commit.CommitID, commitID)\n\tassertEqual(t, commit.RepoID, repoID)\n\tassertEqual(t, commit.CreatorName, \"seafile\")\n\tassertEqual(t, commit.CreatorID, commitID)\n\tassertEqual(t, commit.ParentID.String, commitID)\n}\n"
  },
  {
    "path": "fileserver/commitmgr/null.go",
    "content": "package commitmgr\n\nimport (\n\t\"bytes\"\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"fmt\"\n)\n\n// nullBytes is a JSON null literal\nvar nullBytes = []byte(\"null\")\n\n// String is a nullable string. It supports SQL and JSON serialization.\n// It will marshal to null if null. Blank string input will be considered null.\ntype String struct {\n\tsql.NullString\n}\n\n// StringFrom creates a new String that will never be blank.\nfunc StringFrom(s string) String {\n\treturn NewString(s, true)\n}\n\n// StringFromPtr creates a new String that be null if s is nil.\nfunc StringFromPtr(s *string) String {\n\tif s == nil {\n\t\treturn NewString(\"\", false)\n\t}\n\treturn NewString(*s, true)\n}\n\n// ValueOrZero returns the inner value if valid, otherwise zero.\nfunc (s String) ValueOrZero() string {\n\tif !s.Valid {\n\t\treturn \"\"\n\t}\n\treturn s.String\n}\n\n// NewString creates a new String\nfunc NewString(s string, valid bool) String {\n\treturn String{\n\t\tNullString: sql.NullString{\n\t\t\tString: s,\n\t\t\tValid:  valid,\n\t\t},\n\t}\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\n// It supports string and null input. Blank string input does not produce a null String.\nfunc (s *String) UnmarshalJSON(data []byte) error {\n\tif bytes.Equal(data, nullBytes) {\n\t\ts.Valid = false\n\t\treturn nil\n\t}\n\n\tif err := json.Unmarshal(data, &s.String); err != nil {\n\t\treturn fmt.Errorf(\"null: couldn't unmarshal JSON: %w\", err)\n\t}\n\n\ts.Valid = true\n\treturn nil\n}\n\n// MarshalJSON implements json.Marshaler.\n// It will encode null if this String is null.\nfunc (s String) MarshalJSON() ([]byte, error) {\n\tif !s.Valid {\n\t\treturn []byte(\"null\"), nil\n\t}\n\treturn json.Marshal(s.String)\n}\n\n// MarshalText implements encoding.TextMarshaler.\n// It will encode a blank string when this String is null.\nfunc (s String) MarshalText() ([]byte, error) {\n\tif !s.Valid {\n\t\treturn []byte{}, nil\n\t}\n\treturn []byte(s.String), nil\n}\n\n// UnmarshalText implements encoding.TextUnmarshaler.\n// It will unmarshal to a null String if the input is a blank string.\nfunc (s *String) UnmarshalText(text []byte) error {\n\ts.String = string(text)\n\ts.Valid = s.String != \"\"\n\treturn nil\n}\n\n// SetValid changes this String's value and also sets it to be non-null.\nfunc (s *String) SetValid(v string) {\n\ts.String = v\n\ts.Valid = true\n}\n\n// Ptr returns a pointer to this String's value, or a nil pointer if this String is null.\nfunc (s String) Ptr() *string {\n\tif !s.Valid {\n\t\treturn nil\n\t}\n\treturn &s.String\n}\n\n// IsZero returns true for null strings, for potential future omitempty support.\nfunc (s String) IsZero() bool {\n\treturn !s.Valid\n}\n\n// Equal returns true if both strings have the same value or are both null.\nfunc (s String) Equal(other String) bool {\n\treturn s.Valid == other.Valid && (!s.Valid || s.String == other.String)\n}\n"
  },
  {
    "path": "fileserver/crypt.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"crypto/aes\"\n\t\"crypto/cipher\"\n)\n\ntype seafileCrypt struct {\n\tkey     []byte\n\tiv      []byte\n\tversion int\n}\n\nfunc (crypt *seafileCrypt) encrypt(input []byte) ([]byte, error) {\n\tkey := crypt.key\n\tif crypt.version == 3 {\n\t\tkey = to16Bytes(key)\n\t}\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsize := block.BlockSize()\n\tinput = pkcs7Padding(input, size)\n\tout := make([]byte, len(input))\n\n\tif crypt.version == 3 {\n\t\tfor bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size {\n\t\t\tblock.Encrypt(out[bs:be], input[bs:be])\n\t\t}\n\t\treturn out, nil\n\t}\n\n\tblockMode := cipher.NewCBCEncrypter(block, crypt.iv)\n\tblockMode.CryptBlocks(out, input)\n\n\treturn out, nil\n}\n\nfunc (crypt *seafileCrypt) decrypt(input []byte) ([]byte, error) {\n\tkey := crypt.key\n\tif crypt.version == 3 {\n\t\tkey = to16Bytes(key)\n\t}\n\tblock, err := aes.NewCipher(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := make([]byte, len(input))\n\tsize := block.BlockSize()\n\n\tif crypt.version == 3 {\n\t\t// Encryption repo v3 uses AES_128_ecb mode to encrypt and decrypt, each block is encrypted and decrypted independently,\n\t\t// there is no relationship before and after, and iv is not required.\n\t\tfor bs, be := 0, size; bs < len(input); bs, be = bs+size, be+size {\n\t\t\tblock.Decrypt(out[bs:be], input[bs:be])\n\t\t}\n\t\tout = pkcs7UnPadding(out)\n\t\treturn out, nil\n\t}\n\n\tblockMode := cipher.NewCBCDecrypter(block, crypt.iv)\n\tblockMode.CryptBlocks(out, input)\n\tout = pkcs7UnPadding(out)\n\n\treturn out, nil\n}\n\nfunc pkcs7Padding(p []byte, blockSize int) []byte {\n\tpadding := blockSize - len(p)%blockSize\n\tpadtext := bytes.Repeat([]byte{byte(padding)}, padding)\n\treturn append(p, padtext...)\n}\n\nfunc pkcs7UnPadding(p []byte) []byte {\n\tlength := len(p)\n\tpaddLen := int(p[length-1])\n\treturn p[:(length - paddLen)]\n}\n\nfunc to16Bytes(input []byte) []byte {\n\tout := make([]byte, 16)\n\tcopy(out, input)\n\n\treturn out\n}\n"
  },
  {
    "path": "fileserver/diff/diff.go",
    "content": "package diff\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n)\n\n// Empty value of sha1\nconst (\n\tEmptySha1 = \"0000000000000000000000000000000000000000\"\n)\n\ntype fileCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}) error\ntype dirCB func(context.Context, string, []*fsmgr.SeafDirent, interface{}, *bool) error\n\ntype DiffOptions struct {\n\tFileCB fileCB\n\tDirCB  dirCB\n\tRepoID string\n\tCtx    context.Context\n\tData   interface{}\n\tReader io.ReadCloser\n}\n\ntype diffData struct {\n\tfoldDirDiff bool\n\tresults     *[]*DiffEntry\n}\n\nfunc DiffTrees(roots []string, opt *DiffOptions) error {\n\treader := fsmgr.GetOneZlibReader()\n\tdefer fsmgr.ReturnOneZlibReader(reader)\n\topt.Reader = reader\n\n\tn := len(roots)\n\tif n != 2 && n != 3 {\n\t\terr := fmt.Errorf(\"the number of commit trees is illegal\")\n\t\treturn err\n\t}\n\ttrees := make([]*fsmgr.SeafDir, n)\n\tfor i := 0; i < n; i++ {\n\t\troot, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, roots[i], opt.Reader)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to find dir %s:%s\", opt.RepoID, roots[i])\n\t\t\treturn err\n\t\t}\n\t\ttrees[i] = root\n\t}\n\n\treturn diffTreesRecursive(trees, \"\", opt)\n}\n\nfunc diffTreesRecursive(trees []*fsmgr.SeafDir, baseDir string, opt *DiffOptions) error {\n\tn := len(trees)\n\tptrs := make([][]*fsmgr.SeafDirent, 3)\n\n\tfor i := 0; i < n; i++ {\n\t\tif trees[i] != nil {\n\t\t\tptrs[i] = trees[i].Entries\n\t\t} else {\n\t\t\tptrs[i] = nil\n\t\t}\n\t}\n\n\tvar firstName string\n\tvar done bool\n\tvar offset = make([]int, n)\n\tfor {\n\t\tdents := make([]*fsmgr.SeafDirent, 3)\n\t\tfirstName = \"\"\n\t\tdone = true\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif len(ptrs[i]) > offset[i] {\n\t\t\t\tdone = false\n\t\t\t\tdent := ptrs[i][offset[i]]\n\n\t\t\t\tif firstName == \"\" {\n\t\t\t\t\tfirstName = dent.Name\n\t\t\t\t} else if strings.Compare(dent.Name, firstName) > 0 {\n\t\t\t\t\tfirstName = dent.Name\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif len(ptrs[i]) > offset[i] {\n\t\t\t\tdent := ptrs[i][offset[i]]\n\t\t\t\tif firstName == dent.Name {\n\t\t\t\t\tdents[i] = dent\n\t\t\t\t\toffset[i]++\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t\tif n == 2 && dents[0] != nil && dents[1] != nil &&\n\t\t\tdirentSame(dents[0], dents[1]) {\n\t\t\tcontinue\n\t\t}\n\t\tif n == 3 && dents[0] != nil && dents[1] != nil &&\n\t\t\tdents[2] != nil && direntSame(dents[0], dents[1]) &&\n\t\t\tdirentSame(dents[0], dents[2]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := diffFiles(baseDir, dents, opt); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := diffDirectories(baseDir, dents, opt); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc diffFiles(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {\n\tn := len(dents)\n\tvar nFiles int\n\tfiles := make([]*fsmgr.SeafDirent, 3)\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && fsmgr.IsRegular(dents[i].Mode) {\n\t\t\tfiles[i] = dents[i]\n\t\t\tnFiles++\n\t\t}\n\t}\n\n\tif nFiles == 0 {\n\t\treturn nil\n\t}\n\n\treturn opt.FileCB(opt.Ctx, baseDir, files, opt.Data)\n}\n\nfunc diffDirectories(baseDir string, dents []*fsmgr.SeafDirent, opt *DiffOptions) error {\n\tn := len(dents)\n\tdirs := make([]*fsmgr.SeafDirent, 3)\n\tsubDirs := make([]*fsmgr.SeafDir, 3)\n\tvar nDirs int\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {\n\t\t\tdirs[i] = dents[i]\n\t\t\tnDirs++\n\t\t}\n\t}\n\tif nDirs == 0 {\n\t\treturn nil\n\t}\n\n\trecurse := true\n\terr := opt.DirCB(opt.Ctx, baseDir, dirs, opt.Data, &recurse)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to call dir callback: %w\", err)\n\t\treturn err\n\t}\n\n\tif !recurse {\n\t\treturn nil\n\t}\n\n\tvar dirName string\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {\n\t\t\tdir, err := fsmgr.GetSeafdirWithZlibReader(opt.RepoID, dents[i].ID, opt.Reader)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"Failed to find dir %s:%s\", opt.RepoID, dents[i].ID)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsubDirs[i] = dir\n\t\t\tdirName = dents[i].Name\n\t\t}\n\t}\n\n\tnewBaseDir := baseDir + dirName + \"/\"\n\treturn diffTreesRecursive(subDirs, newBaseDir, opt)\n}\n\nfunc direntSame(dentA, dentB *fsmgr.SeafDirent) bool {\n\treturn dentA.ID == dentB.ID &&\n\t\tdentA.Mode == dentB.Mode &&\n\t\tdentA.Mtime == dentB.Mtime\n}\n\n// Diff type and diff status.\nconst (\n\tDiffTypeCommits = 'C' /* diff between two commits*/\n\n\tDiffStatusAdded      = 'A'\n\tDiffStatusDeleted    = 'D'\n\tDiffStatusModified   = 'M'\n\tDiffStatusRenamed    = 'R'\n\tDiffStatusUnmerged   = 'U'\n\tDiffStatusDirAdded   = 'B'\n\tDiffStatusDirDeleted = 'C'\n\tDiffStatusDirRenamed = 'E'\n)\n\ntype DiffEntry struct {\n\tDiffType   rune\n\tStatus     rune\n\tSha1       string\n\tName       string\n\tNewName    string\n\tSize       int64\n\tOriginSize int64\n}\n\nfunc diffEntryNewFromDirent(diffType, status rune, dent *fsmgr.SeafDirent, baseDir string) *DiffEntry {\n\tde := new(DiffEntry)\n\tde.Sha1 = dent.ID\n\tde.DiffType = diffType\n\tde.Status = status\n\tde.Size = dent.Size\n\tde.Name = filepath.Join(baseDir, dent.Name)\n\n\treturn de\n}\n\nfunc diffEntryNew(diffType, status rune, dirID, name string) *DiffEntry {\n\tde := new(DiffEntry)\n\tde.DiffType = diffType\n\tde.Status = status\n\tde.Sha1 = dirID\n\tde.Name = name\n\n\treturn de\n}\n\nfunc DiffMergeRoots(storeID, mergedRoot, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {\n\troots := []string{mergedRoot, p1Root, p2Root}\n\n\topt := new(DiffOptions)\n\topt.RepoID = storeID\n\topt.FileCB = threewayDiffFiles\n\topt.DirCB = threewayDiffDirs\n\topt.Data = diffData{foldDirDiff, results}\n\n\terr := DiffTrees(roots, opt)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to diff trees: %v\", err)\n\t\treturn err\n\t}\n\n\tdiffResolveRenames(results)\n\n\treturn nil\n}\n\nfunc threewayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {\n\tm := dents[0]\n\tp1 := dents[1]\n\tp2 := dents[2]\n\tdata, ok := optData.(diffData)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert diff data\")\n\t\treturn err\n\t}\n\tresults := data.results\n\n\tif m != nil && p1 != nil && p2 != nil {\n\t\tif !direntSame(m, p1) && !direntSame(m, p2) {\n\t\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)\n\t\t\t*results = append(*results, de)\n\t\t}\n\t} else if m == nil && p1 != nil && p2 != nil {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)\n\t\t*results = append(*results, de)\n\t} else if m != nil && p1 == nil && p2 != nil {\n\t\tif !direntSame(m, p2) {\n\t\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)\n\t\t\t*results = append(*results, de)\n\t\t}\n\t} else if m != nil && p1 != nil && p2 == nil {\n\t\tif !direntSame(m, p1) {\n\t\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, m, baseDir)\n\t\t\t*results = append(*results, de)\n\t\t}\n\t} else if m != nil && p1 == nil && p2 == nil {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, m, baseDir)\n\t\t*results = append(*results, de)\n\t}\n\n\treturn nil\n}\n\nfunc threewayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {\n\t*recurse = true\n\treturn nil\n}\n\nfunc DiffCommitRoots(storeID, p1Root, p2Root string, results *[]*DiffEntry, foldDirDiff bool) error {\n\troots := []string{p1Root, p2Root}\n\n\topt := new(DiffOptions)\n\topt.RepoID = storeID\n\topt.FileCB = twowayDiffFiles\n\topt.DirCB = twowayDiffDirs\n\topt.Data = diffData{foldDirDiff, results}\n\n\terr := DiffTrees(roots, opt)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to diff trees: %v\", err)\n\t\treturn err\n\t}\n\n\tdiffResolveRenames(results)\n\n\treturn nil\n}\n\nfunc DiffCommits(commit1, commit2 *commitmgr.Commit, results *[]*DiffEntry, foldDirDiff bool) error {\n\trepo := repomgr.Get(commit1.RepoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %s\", commit1.RepoID)\n\t\treturn err\n\t}\n\troots := []string{commit1.RootID, commit2.RootID}\n\n\topt := new(DiffOptions)\n\topt.RepoID = repo.StoreID\n\topt.FileCB = twowayDiffFiles\n\topt.DirCB = twowayDiffDirs\n\topt.Data = diffData{foldDirDiff, results}\n\n\terr := DiffTrees(roots, opt)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to diff trees: %v\", err)\n\t\treturn err\n\t}\n\n\tdiffResolveRenames(results)\n\n\treturn nil\n}\n\nfunc twowayDiffFiles(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}) error {\n\tp1 := dents[0]\n\tp2 := dents[1]\n\tdata, ok := optData.(diffData)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert diff data\")\n\t\treturn err\n\t}\n\tresults := data.results\n\n\tif p1 == nil {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusAdded, p2, baseDir)\n\t\t*results = append(*results, de)\n\t\treturn nil\n\t}\n\n\tif p2 == nil {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDeleted, p1, baseDir)\n\t\t*results = append(*results, de)\n\t\treturn nil\n\t}\n\n\tif !direntSame(p1, p2) {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusModified, p2, baseDir)\n\t\tde.OriginSize = p1.Size\n\t\t*results = append(*results, de)\n\t}\n\n\treturn nil\n}\n\nfunc twowayDiffDirs(ctx context.Context, baseDir string, dents []*fsmgr.SeafDirent, optData interface{}, recurse *bool) error {\n\tp1 := dents[0]\n\tp2 := dents[1]\n\tdata, ok := optData.(diffData)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert diff data\")\n\t\treturn err\n\t}\n\tresults := data.results\n\n\tif p1 == nil {\n\t\tif p2.ID == EmptySha1 || data.foldDirDiff {\n\t\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirAdded, p2, baseDir)\n\t\t\t*results = append(*results, de)\n\t\t\t*recurse = false\n\t\t} else {\n\t\t\t*recurse = true\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif p2 == nil {\n\t\tde := diffEntryNewFromDirent(DiffTypeCommits, DiffStatusDirDeleted, p1, baseDir)\n\t\t*results = append(*results, de)\n\t\tif data.foldDirDiff {\n\t\t\t*recurse = false\n\t\t} else {\n\t\t\t*recurse = true\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc diffResolveRenames(des *[]*DiffEntry) error {\n\tvar deletedEmptyCount, deletedEmptyDirCount, addedEmptyCount, addedEmptyDirCount int\n\tfor _, de := range *des {\n\t\tif de.Sha1 == EmptySha1 {\n\t\t\tif de.Status == DiffStatusDeleted {\n\t\t\t\tdeletedEmptyCount++\n\t\t\t}\n\t\t\tif de.Status == DiffStatusDirDeleted {\n\t\t\t\tdeletedEmptyDirCount++\n\t\t\t}\n\t\t\tif de.Status == DiffStatusAdded {\n\t\t\t\taddedEmptyCount++\n\t\t\t}\n\t\t\tif de.Status == DiffStatusDirAdded {\n\t\t\t\taddedEmptyDirCount++\n\t\t\t}\n\t\t}\n\t}\n\n\tdeletedFiles := make(map[string]*DiffEntry)\n\tdeletedDirs := make(map[string]*DiffEntry)\n\tvar results []*DiffEntry\n\tvar added []*DiffEntry\n\n\tcheckEmptyDir := (deletedEmptyDirCount == 1 && addedEmptyDirCount == 1)\n\tcheckEmptyFile := (deletedEmptyCount == 1 && addedEmptyCount == 1)\n\n\tfor _, de := range *des {\n\t\tif de.Status == DiffStatusDeleted {\n\t\t\tif de.Sha1 == EmptySha1 && !checkEmptyFile {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := deletedFiles[de.Sha1]; ok {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdeletedFiles[de.Sha1] = de\n\t\t}\n\n\t\tif de.Status == DiffStatusDirDeleted {\n\t\t\tif de.Sha1 == EmptySha1 && !checkEmptyDir {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := deletedDirs[de.Sha1]; ok {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdeletedDirs[de.Sha1] = de\n\t\t}\n\n\t\tif de.Status == DiffStatusAdded {\n\t\t\tif de.Sha1 == EmptySha1 && !checkEmptyFile {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tadded = append(added, de)\n\t\t}\n\n\t\tif de.Status == DiffStatusDirAdded {\n\t\t\tif de.Sha1 == EmptySha1 && !checkEmptyDir {\n\t\t\t\tresults = append(results, de)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tadded = append(added, de)\n\t\t}\n\n\t\tif de.Status == DiffStatusModified {\n\t\t\tresults = append(results, de)\n\t\t}\n\t}\n\n\tfor _, de := range added {\n\t\tvar deAdd, deDel, deRename *DiffEntry\n\t\tvar renameStatus rune\n\n\t\tdeAdd = de\n\t\tif deAdd.Status == DiffStatusAdded {\n\t\t\tdeTmp, ok := deletedFiles[de.Sha1]\n\t\t\tif !ok {\n\t\t\t\tresults = append(results, deAdd)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdeDel = deTmp\n\t\t} else {\n\t\t\tdeTmp, ok := deletedDirs[de.Sha1]\n\t\t\tif !ok {\n\t\t\t\tresults = append(results, deAdd)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdeDel = deTmp\n\t\t}\n\n\t\tif deAdd.Status == DiffStatusDirAdded {\n\t\t\trenameStatus = DiffStatusDirRenamed\n\t\t} else {\n\t\t\trenameStatus = DiffStatusRenamed\n\t\t}\n\n\t\tdeRename = diffEntryNew(deDel.DiffType, renameStatus, deDel.Sha1, deDel.Name)\n\t\tdeRename.NewName = de.Name\n\t\tresults = append(results, deRename)\n\t\tif deDel.Status == DiffStatusDirDeleted {\n\t\t\tdelete(deletedDirs, deAdd.Sha1)\n\t\t} else {\n\t\t\tdelete(deletedFiles, deAdd.Sha1)\n\t\t}\n\t}\n\n\tfor _, de := range deletedFiles {\n\t\tresults = append(results, de)\n\t}\n\n\tfor _, de := range deletedDirs {\n\t\tresults = append(results, de)\n\t}\n\t*des = results\n\n\treturn nil\n}\n\nfunc DiffResultsToDesc(results []*DiffEntry) string {\n\tvar nAddMod, nRemoved, nRenamed int\n\tvar nNewDir, nRemovedDir int\n\tvar addModFile, removedFile string\n\tvar renamedFile string\n\tvar newDir, removedDir string\n\tvar desc string\n\n\tif results == nil {\n\t\treturn \"\"\n\t}\n\n\tfor _, de := range results {\n\t\tswitch de.Status {\n\t\tcase DiffStatusAdded:\n\t\t\tif nAddMod == 0 {\n\t\t\t\taddModFile = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnAddMod++\n\t\tcase DiffStatusDeleted:\n\t\t\tif nRemoved == 0 {\n\t\t\t\tremovedFile = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnRemoved++\n\t\tcase DiffStatusRenamed:\n\t\t\tif nRenamed == 0 {\n\t\t\t\trenamedFile = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnRenamed++\n\t\tcase DiffStatusModified:\n\t\t\tif nAddMod == 0 {\n\t\t\t\taddModFile = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnAddMod++\n\t\tcase DiffStatusDirAdded:\n\t\t\tif nNewDir == 0 {\n\t\t\t\tnewDir = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnNewDir++\n\t\tcase DiffStatusDirDeleted:\n\t\t\tif nRemovedDir == 0 {\n\t\t\t\tremovedDir = filepath.Base(de.Name)\n\t\t\t}\n\t\t\tnRemovedDir++\n\t\t}\n\t}\n\n\tif nAddMod == 1 {\n\t\tdesc = fmt.Sprintf(\"Added or modified \\\"%s\\\".\\n\", addModFile)\n\t} else if nAddMod > 1 {\n\t\tdesc = fmt.Sprintf(\"Added or modified \\\"%s\\\" and %d more files.\\n\", addModFile, nAddMod-1)\n\t}\n\n\tif nRemoved == 1 {\n\t\tdesc += fmt.Sprintf(\"Deleted \\\"%s\\\".\\n\", removedFile)\n\t} else if nRemoved > 1 {\n\t\tdesc += fmt.Sprintf(\"Deleted \\\"%s\\\" and %d more files.\\n\", removedFile, nRemoved-1)\n\t}\n\n\tif nRenamed == 1 {\n\t\tdesc += fmt.Sprintf(\"Renamed \\\"%s\\\".\\n\", renamedFile)\n\t} else if nRenamed > 1 {\n\t\tdesc += fmt.Sprintf(\"Renamed \\\"%s\\\" and %d more files.\\n\", renamedFile, nRenamed-1)\n\t}\n\n\tif nNewDir == 1 {\n\t\tdesc += fmt.Sprintf(\"Added directory \\\"%s\\\".\\n\", newDir)\n\t} else if nNewDir > 1 {\n\t\tdesc += fmt.Sprintf(\"Added \\\"%s\\\" and %d more directories.\\n\", newDir, nNewDir-1)\n\t}\n\n\tif nRemovedDir == 1 {\n\t\tdesc += fmt.Sprintf(\"Removed directory \\\"%s\\\".\\n\", removedDir)\n\t} else if nRemovedDir > 1 {\n\t\tdesc += fmt.Sprintf(\"Removed \\\"%s\\\" and %d more directories.\\n\", removedDir, nRemovedDir-1)\n\t}\n\n\treturn desc\n}\n"
  },
  {
    "path": "fileserver/diff/diff_test.go",
    "content": "package diff\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n)\n\nconst (\n\temptySHA1               = \"0000000000000000000000000000000000000000\"\n\tdiffTestSeafileConfPath = \"/tmp/conf\"\n\tdiffTestSeafileDataDir  = \"/tmp/conf/seafile-data\"\n\tdiffTestRepoID          = \"0d18a711-c988-4f7b-960c-211b34705ce3\"\n)\n\nvar diffTestTree1 string\nvar diffTestTree2 string\nvar diffTestTree3 string\nvar diffTestTree4 string\nvar diffTestFileID string\nvar diffTestDirID1 string\nvar diffTestDirID2 string\n\n/*\n   test directory structure:\n\n   tree1\n   |--\n\n   tree2\n   |--file\n\n   tree3\n   |--dir\n\n   tree4\n   |--dir\n      |-- file\n\n*/\n\nfunc TestDiffTrees(t *testing.T) {\n\tfsmgr.Init(diffTestSeafileConfPath, diffTestSeafileDataDir, 2<<30)\n\n\terr := diffTestCreateTestDir()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create test dir: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tt.Run(\"test1\", testDiffTrees1)\n\tt.Run(\"test2\", testDiffTrees2)\n\tt.Run(\"test3\", testDiffTrees3)\n\tt.Run(\"test4\", testDiffTrees4)\n\tt.Run(\"test5\", testDiffTrees5)\n\n\terr = diffTestDelFile()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to remove test file : %v\", err)\n\t}\n}\n\nfunc diffTestCreateTestDir() error {\n\tmodeDir := uint32(syscall.S_IFDIR | 0644)\n\tmodeFile := uint32(syscall.S_IFREG | 0644)\n\n\tdir1, err := diffTestCreateSeafdir(nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tdiffTestTree1 = dir1\n\tfile1, err := fsmgr.NewSeafile(1, 1, nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafile: %v\", err)\n\t\treturn err\n\t}\n\tdiffTestFileID = file1.FileID\n\terr = fsmgr.SaveSeafile(diffTestRepoID, file1)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save seafile: %v\", err)\n\t\treturn err\n\t}\n\tdent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: \"file\", Mode: modeFile, Size: 1}\n\tdir2, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tdiffTestTree2 = dir2\n\n\tdent2 := fsmgr.SeafDirent{ID: dir1, Name: \"dir\", Mode: modeDir}\n\tdiffTestDirID1 = dir1\n\tdir3, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tdiffTestTree3 = dir3\n\n\tdent3 := fsmgr.SeafDirent{ID: dir2, Name: \"dir\", Mode: modeDir}\n\tdiffTestDirID2 = dir2\n\tdir4, err := diffTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tdiffTestTree4 = dir4\n\n\treturn nil\n}\n\nfunc testDiffTrees1(t *testing.T) {\n\tvar results []interface{}\n\topt := &DiffOptions{\n\t\tFileCB: diffTestFileCB,\n\t\tDirCB:  diffTestDirCB,\n\t\tRepoID: diffTestRepoID}\n\topt.Data = &results\n\tDiffTrees([]string{diffTestTree2, diffTestTree1}, opt)\n\tif len(results) != 1 {\n\t\tt.Errorf(\"data length is %d not 1\", len(results))\n\t}\n\tvar ret = make([]string, len(results))\n\tfor k, v := range results {\n\t\tret[k] = fmt.Sprintf(\"%s\", v)\n\t}\n\tif ret[0] != diffTestFileID {\n\t\tt.Errorf(\"result %s != %s\", ret[0], diffTestFileID)\n\t}\n}\n\nfunc testDiffTrees2(t *testing.T) {\n\tvar results []interface{}\n\topt := &DiffOptions{\n\t\tFileCB: diffTestFileCB,\n\t\tDirCB:  diffTestDirCB,\n\t\tRepoID: diffTestRepoID}\n\topt.Data = &results\n\tDiffTrees([]string{diffTestTree3, diffTestTree1}, opt)\n\tif len(results) != 1 {\n\t\tt.Errorf(\"data length is %d not 1\", len(results))\n\t}\n\tvar ret = make([]string, len(results))\n\tfor k, v := range results {\n\t\tret[k] = fmt.Sprintf(\"%s\", v)\n\t}\n\tif ret[0] != diffTestDirID1 {\n\t\tt.Errorf(\"result %s != %s\", ret[0], diffTestDirID1)\n\t}\n\n}\n\nfunc testDiffTrees3(t *testing.T) {\n\tvar results []interface{}\n\topt := &DiffOptions{\n\t\tFileCB: diffTestFileCB,\n\t\tDirCB:  diffTestDirCB,\n\t\tRepoID: diffTestRepoID}\n\topt.Data = &results\n\tDiffTrees([]string{diffTestTree4, diffTestTree1}, opt)\n\tif len(results) != 2 {\n\t\tt.Errorf(\"data length is %d not 1\", len(results))\n\t}\n\n\tvar ret = make([]string, len(results))\n\tfor k, v := range results {\n\t\tret[k] = fmt.Sprintf(\"%s\", v)\n\t}\n\tif ret[0] != diffTestDirID2 {\n\t\tt.Errorf(\"result %s != %s\", ret[0], diffTestDirID2)\n\t}\n\tif ret[1] != diffTestFileID {\n\t\tt.Errorf(\"result %s != %s\", ret[1], diffTestFileID)\n\t}\n}\n\nfunc testDiffTrees4(t *testing.T) {\n\tvar results []interface{}\n\topt := &DiffOptions{\n\t\tFileCB: diffTestFileCB,\n\t\tDirCB:  diffTestDirCB,\n\t\tRepoID: diffTestRepoID}\n\topt.Data = &results\n\tDiffTrees([]string{diffTestTree4, diffTestTree3}, opt)\n\tif len(results) != 2 {\n\t\tt.Errorf(\"data length is %d not 1\", len(results))\n\t}\n\n\tvar ret = make([]string, len(results))\n\tfor k, v := range results {\n\t\tret[k] = fmt.Sprintf(\"%s\", v)\n\t}\n\tif ret[0] != diffTestDirID2 {\n\t\tt.Errorf(\"result %s != %s\", ret[0], diffTestDirID2)\n\t}\n\tif ret[1] != diffTestFileID {\n\t\tt.Errorf(\"result %s != %s\", ret[1], diffTestFileID)\n\t}\n}\n\nfunc testDiffTrees5(t *testing.T) {\n\tvar results []interface{}\n\topt := &DiffOptions{\n\t\tFileCB: diffTestFileCB,\n\t\tDirCB:  diffTestDirCB,\n\t\tRepoID: diffTestRepoID}\n\topt.Data = &results\n\tDiffTrees([]string{diffTestTree3, diffTestTree2}, opt)\n\tif len(results) != 1 {\n\t\tt.Errorf(\"data length is %d not 1\", len(results))\n\t}\n\tvar ret = make([]string, len(results))\n\tfor k, v := range results {\n\t\tret[k] = fmt.Sprintf(\"%s\", v)\n\t}\n\tif ret[0] != diffTestDirID1 {\n\t\tt.Errorf(\"result %s != %s\", ret[0], diffTestDirID1)\n\t}\n}\n\nfunc diffTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {\n\tseafdir, err := fsmgr.NewSeafdir(1, dents)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = fsmgr.SaveSeafdir(diffTestRepoID, seafdir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn seafdir.DirID, nil\n}\n\nfunc diffTestDelFile() error {\n\terr := os.RemoveAll(diffTestSeafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc diffTestFileCB(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {\n\tfile1 := files[0]\n\tfile2 := files[1]\n\tresults, ok := data.(*[]interface{})\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert results\")\n\t\treturn err\n\t}\n\n\tif file1 != nil &&\n\t\t(file2 == nil || file1.ID != file2.ID) {\n\t\t*results = append(*results, file1.ID)\n\t}\n\n\treturn nil\n}\n\nfunc diffTestDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {\n\tdir1 := dirs[0]\n\tdir2 := dirs[1]\n\tresults, ok := data.(*[]interface{})\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert results\")\n\t\treturn err\n\t}\n\n\tif dir1 != nil &&\n\t\t(dir2 == nil || dir1.ID != dir2.ID) {\n\t\t*results = append(*results, dir1.ID)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "fileserver/fileop.go",
    "content": "package main\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/sha1\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"mime/multipart\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime/debug\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode/utf8\"\n\n\t\"database/sql\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"syscall\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/haiwen/seafile-server/fileserver/blockmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/diff\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n\t\"github.com/haiwen/seafile-server/fileserver/workerpool\"\n\tlog \"github.com/sirupsen/logrus\"\n\t\"golang.org/x/text/unicode/norm\"\n)\n\nconst (\n\tcacheBlockMapThreshold          = 1 << 23\n\tblockMapCacheExpiretime   int64 = 3600 * 24\n\tfileopCleaningIntervalSec       = 3600\n\tduplicateNamesCount             = 1000\n)\n\nvar (\n\tblockMapCacheTable sync.Map\n\tindexFilePool      *workerpool.WorkPool\n)\n\n// Dirents is an alias for slice of SeafDirent.\ntype Dirents []*fsmgr.SeafDirent\n\nfunc (d Dirents) Less(i, j int) bool {\n\treturn d[i].Name > d[j].Name\n}\n\nfunc (d Dirents) Swap(i, j int) {\n\td[i], d[j] = d[j], d[i]\n}\nfunc (d Dirents) Len() int {\n\treturn len(d)\n}\n\nfunc fileopInit() {\n\tticker := time.NewTicker(time.Second * fileopCleaningIntervalSec)\n\tgo RecoverWrapper(func() {\n\t\tfor range ticker.C {\n\t\t\tremoveFileopExpireCache()\n\t\t}\n\t})\n\n\tindexFilePool = workerpool.CreateWorkerPool(indexFileWorker, int(option.MaxIndexingFiles))\n}\n\nfunc initUpload() {\n\tobjDir := filepath.Join(dataDir, \"httptemp\", \"cluster-shared\")\n\tos.MkdirAll(objDir, os.ModePerm)\n}\n\n// contentType = \"application/octet-stream\"\nfunc parseContentType(fileName string) string {\n\tvar contentType string\n\n\tparts := strings.Split(fileName, \".\")\n\tif len(parts) >= 2 {\n\t\tsuffix := parts[len(parts)-1]\n\t\tsuffix = strings.ToLower(suffix)\n\t\tswitch suffix {\n\t\tcase \"txt\":\n\t\t\tcontentType = \"text/plain\"\n\t\tcase \"doc\":\n\t\t\tcontentType = \"application/vnd.ms-word\"\n\t\tcase \"docx\":\n\t\t\tcontentType = \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\"\n\t\tcase \"ppt\":\n\t\t\tcontentType = \"application/vnd.ms-powerpoint\"\n\t\tcase \"xls\":\n\t\t\tcontentType = \"application/vnd.ms-excel\"\n\t\tcase \"xlsx\":\n\t\t\tcontentType = \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n\t\tcase \"pdf\":\n\t\t\tcontentType = \"application/pdf\"\n\t\tcase \"zip\":\n\t\t\tcontentType = \"application/zip\"\n\t\tcase \"mp3\":\n\t\t\tcontentType = \"audio/mp3\"\n\t\tcase \"mpeg\":\n\t\t\tcontentType = \"video/mpeg\"\n\t\tcase \"mp4\":\n\t\t\tcontentType = \"video/mp4\"\n\t\tcase \"ogv\":\n\t\t\tcontentType = \"video/ogg\"\n\t\tcase \"mov\":\n\t\t\tcontentType = \"video/mp4\"\n\t\tcase \"webm\":\n\t\t\tcontentType = \"video/webm\"\n\t\tcase \"mkv\":\n\t\t\tcontentType = \"video/x-matroska\"\n\t\tcase \"jpeg\", \"JPEG\", \"jpg\", \"JPG\":\n\t\t\tcontentType = \"image/jpeg\"\n\t\tcase \"png\", \"PNG\":\n\t\t\tcontentType = \"image/png\"\n\t\tcase \"gif\", \"GIF\":\n\t\t\tcontentType = \"image/gif\"\n\t\tcase \"svg\", \"SVG\":\n\t\t\tcontentType = \"image/svg+xml\"\n\t\tcase \"heic\":\n\t\t\tcontentType = \"image/heic\"\n\t\tcase \"ico\":\n\t\t\tcontentType = \"image/x-icon\"\n\t\tcase \"bmp\":\n\t\t\tcontentType = \"image/bmp\"\n\t\tcase \"tif\", \"tiff\":\n\t\t\tcontentType = \"image/tiff\"\n\t\tcase \"psd\":\n\t\t\tcontentType = \"image/vnd.adobe.photoshop\"\n\t\tcase \"webp\":\n\t\t\tcontentType = \"image/webp\"\n\t\tcase \"jfif\":\n\t\t\tcontentType = \"image/jpeg\"\n\t\t}\n\t}\n\n\treturn contentType\n}\n\nfunc accessCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 3 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1]\n\tfileName := parts[2]\n\taccessInfo, err := parseWebaccessInfo(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trepoID := accessInfo.repoID\n\top := accessInfo.op\n\tuser := accessInfo.user\n\tobjID := accessInfo.objID\n\n\tif op != \"view\" && op != \"download\" && op != \"download-link\" {\n\t\tmsg := \"Operation does not match access token.\"\n\t\treturn &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tif _, ok := r.Header[\"If-Modified-Since\"]; ok {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\tnow := time.Now()\n\trsp.Header().Set(\"ETag\", objID)\n\trsp.Header().Set(\"Last-Modified\", now.Format(\"Mon, 2 Jan 2006 15:04:05 GMT\"))\n\trsp.Header().Set(\"Cache-Control\", \"max-age=3600\")\n\n\tranges := r.Header[\"Range\"]\n\tbyteRanges := strings.Join(ranges, \"\")\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Bad repo id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\texists, _ := fsmgr.Exists(repo.StoreID, objID)\n\tif !exists {\n\t\tmsg := \"Invalid file id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif !repo.IsEncrypted && len(byteRanges) != 0 {\n\t\tif err := doFileRange(rsp, r, repo, objID, fileName, op, byteRanges, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if err := doFile(rsp, r, repo, objID, fileName, op, cryptKey, user); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc parseCryptKey(rsp http.ResponseWriter, repoID string, user string, version int) (*seafileCrypt, *appError) {\n\tkey, err := rpcclient.Call(\"seafile_get_decrypt_key\", repoID, user)\n\tif err != nil {\n\t\terrMessage := \"Repo is encrypted. Please provide password to view it.\"\n\t\treturn nil, &appError{nil, errMessage, http.StatusBadRequest}\n\t}\n\n\tcryptKey, ok := key.(map[string]interface{})\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert crypt key\")\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tseafileKey := new(seafileCrypt)\n\tseafileKey.version = version\n\n\tif cryptKey != nil {\n\t\tkey, ok := cryptKey[\"key\"].(string)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"failed to parse crypt key\")\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tiv, ok := cryptKey[\"iv\"].(string)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"failed to parse crypt iv\")\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tseafileKey.key, err = hex.DecodeString(key)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to decode key: %v\", err)\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tseafileKey.iv, err = hex.DecodeString(iv)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to decode iv: %v\", err)\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\treturn seafileKey, nil\n}\n\nfunc accessV2CB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tfilePath := vars[\"filepath\"]\n\n\tif filePath == \"\" {\n\t\tmsg := \"No file path\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\trpath := getCanonPath(filePath)\n\tfileName := filepath.Base(rpath)\n\n\top := r.URL.Query().Get(\"op\")\n\tif op != \"view\" && op != \"download\" {\n\t\tmsg := \"Operation is neither view or download\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\ttoken := utils.GetAuthorizationToken(r.Header)\n\tcookie := r.Header.Get(\"Cookie\")\n\n\tif token == \"\" && cookie == \"\" {\n\t\tmsg := \"Both token and cookie are not set\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tipAddr := getClientIPAddr(r)\n\tuserAgent := r.Header.Get(\"User-Agent\")\n\tuser, appErr := checkFileAccess(repoID, token, cookie, filePath, \"download\", ipAddr, userAgent)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Bad repo id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tfileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, rpath)\n\tif err != nil {\n\t\tmsg := \"Invalid file_path\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tetag := r.Header.Get(\"If-None-Match\")\n\tif etag == fileID {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\trsp.Header().Set(\"ETag\", fileID)\n\trsp.Header().Set(\"Cache-Control\", \"private, no-cache\")\n\n\tranges := r.Header[\"Range\"]\n\tbyteRanges := strings.Join(ranges, \"\")\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\texists, _ := fsmgr.Exists(repo.StoreID, fileID)\n\tif !exists {\n\t\tmsg := \"Invalid file id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif !repo.IsEncrypted && len(byteRanges) != 0 {\n\t\tif err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype UserInfo struct {\n\tUser string `json:\"user\"`\n}\n\nfunc checkFileAccess(repoID, token, cookie, filePath, op, ipAddr, userAgent string) (string, *appError) {\n\ttokenString, err := utils.GenSeahubJWTToken()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to sign jwt token: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\turl := fmt.Sprintf(\"%s/repos/%s/check-access/\", option.SeahubURL, repoID)\n\theader := map[string][]string{\n\t\t\"Authorization\": {\"Token \" + tokenString},\n\t}\n\tif cookie != \"\" {\n\t\theader[\"Cookie\"] = []string{cookie}\n\t}\n\treq := make(map[string]string)\n\treq[\"op\"] = op\n\treq[\"path\"] = filePath\n\tif token != \"\" {\n\t\treq[\"token\"] = token\n\t}\n\tif ipAddr != \"\" {\n\t\treq[\"ip_addr\"] = ipAddr\n\t}\n\tif userAgent != \"\" {\n\t\treq[\"user_agent\"] = userAgent\n\t}\n\tmsg, err := json.Marshal(req)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to encode access token: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tstatus, body, err := utils.HttpCommon(\"POST\", url, header, bytes.NewReader(msg))\n\tif err != nil {\n\t\tif status != http.StatusInternalServerError {\n\t\t\treturn \"\", &appError{nil, string(body), status}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to get access token info: %v\", err)\n\t\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\tinfo := new(UserInfo)\n\terr = json.Unmarshal(body, &info)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to decode access token info: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\treturn info.User, nil\n}\n\nfunc doFile(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,\n\tfileName string, operation string, cryptKey *seafileCrypt, user string) *appError {\n\tfile, err := fsmgr.GetSeafile(repo.StoreID, fileID)\n\tif err != nil {\n\t\tmsg := \"Failed to get seafile\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\trsp.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tfileType := parseContentType(fileName)\n\tif fileType == \"image/svg+xml\" {\n\t\trsp.Header().Set(\"Content-Security-Policy\", \"sandbox\")\n\t}\n\n\tsetCommonHeaders(rsp, r, operation, fileName)\n\n\t//filesize string\n\tfileSize := fmt.Sprintf(\"%d\", file.FileSize)\n\trsp.Header().Set(\"Content-Length\", fileSize)\n\n\tif r.Method == \"HEAD\" {\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\tif file.FileSize == 0 {\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tif cryptKey != nil {\n\t\tfor _, blkID := range file.BlkIDs {\n\t\t\tvar buf bytes.Buffer\n\t\t\tblockmgr.Read(repo.StoreID, blkID, &buf)\n\t\t\tdecoded, err := cryptKey.decrypt(buf.Bytes())\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to decrypt block %s: %v\", blkID, err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\t_, err = rsp.Write(decoded)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, blkID := range file.BlkIDs {\n\t\terr := blockmgr.Read(repo.StoreID, blkID, rsp)\n\t\tif err != nil {\n\t\t\tif !isNetworkErr(err) {\n\t\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\toper := \"web-file-download\"\n\tif operation == \"download-link\" {\n\t\toper = \"link-file-download\"\n\t}\n\tsendStatisticMsg(repo.StoreID, user, oper, file.FileSize)\n\n\treturn nil\n}\n\nfunc isNetworkErr(err error) bool {\n\t_, ok := err.(net.Error)\n\treturn ok\n}\n\ntype blockMap struct {\n\tblkSize    []uint64\n\texpireTime int64\n}\n\nfunc doFileRange(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,\n\tfileName string, operation string, byteRanges string, user string) *appError {\n\n\tfile, err := fsmgr.GetSeafile(repo.StoreID, fileID)\n\tif err != nil {\n\t\tmsg := \"Failed to get seafile\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif file.FileSize == 0 {\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tstart, end, ok := parseRange(byteRanges, file.FileSize)\n\tif !ok {\n\t\tconRange := fmt.Sprintf(\"bytes */%d\", file.FileSize)\n\t\trsp.Header().Set(\"Content-Range\", conRange)\n\t\treturn &appError{nil, \"\", http.StatusRequestedRangeNotSatisfiable}\n\t}\n\n\trsp.Header().Set(\"Accept-Ranges\", \"bytes\")\n\n\tfileType := parseContentType(fileName)\n\tif fileType == \"image/svg+xml\" {\n\t\trsp.Header().Set(\"Content-Security-Policy\", \"sandbox\")\n\t}\n\n\tsetCommonHeaders(rsp, r, operation, fileName)\n\n\t//filesize string\n\tconLen := fmt.Sprintf(\"%d\", end-start+1)\n\trsp.Header().Set(\"Content-Length\", conLen)\n\n\tconRange := fmt.Sprintf(\"bytes %d-%d/%d\", start, end, file.FileSize)\n\trsp.Header().Set(\"Content-Range\", conRange)\n\n\trsp.WriteHeader(http.StatusPartialContent)\n\n\tvar blkSize []uint64\n\tif file.FileSize > cacheBlockMapThreshold {\n\t\tif v, ok := blockMapCacheTable.Load(file.FileID); ok {\n\t\t\tif blkMap, ok := v.(*blockMap); ok {\n\t\t\t\tblkSize = blkMap.blkSize\n\t\t\t}\n\t\t}\n\t\tif len(blkSize) == 0 {\n\t\t\tfor _, v := range file.BlkIDs {\n\t\t\t\tsize, err := blockmgr.Stat(repo.StoreID, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr := fmt.Errorf(\"failed to stat block %s : %v\", v, err)\n\t\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t\t}\n\t\t\t\tblkSize = append(blkSize, uint64(size))\n\t\t\t}\n\t\t\tblockMapCacheTable.Store(file.FileID, &blockMap{blkSize, time.Now().Unix() + blockMapCacheExpiretime})\n\t\t}\n\t} else {\n\t\tfor _, v := range file.BlkIDs {\n\t\t\tsize, err := blockmgr.Stat(repo.StoreID, v)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to stat block %s : %v\", v, err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\tblkSize = append(blkSize, uint64(size))\n\t\t}\n\t}\n\n\tvar off uint64\n\tvar pos uint64\n\tvar startBlock int\n\tfor i, v := range blkSize {\n\t\tpos = start - off\n\t\toff += v\n\t\tif off > start {\n\t\t\tstartBlock = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Read block from the start block and specified position\n\tvar i int\n\tfor ; i < len(file.BlkIDs); i++ {\n\t\tif i < startBlock {\n\t\t\tcontinue\n\t\t}\n\n\t\tblkID := file.BlkIDs[i]\n\t\tvar buf bytes.Buffer\n\t\tif end-start+1 <= blkSize[i]-pos {\n\t\t\terr := blockmgr.Read(repo.StoreID, blkID, &buf)\n\t\t\tif err != nil {\n\t\t\t\tif !isNetworkErr(err) {\n\t\t\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trecvBuf := buf.Bytes()\n\t\t\trsp.Write(recvBuf[pos : pos+end-start+1])\n\t\t\treturn nil\n\t\t}\n\n\t\terr := blockmgr.Read(repo.StoreID, blkID, &buf)\n\t\tif err != nil {\n\t\t\tif !isNetworkErr(err) {\n\t\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\trecvBuf := buf.Bytes()\n\t\t_, err = rsp.Write(recvBuf[pos:])\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tstart += blkSize[i] - pos\n\t\ti++\n\t\tbreak\n\t}\n\n\t// Always read block from the remaining block and pos=0\n\tfor ; i < len(file.BlkIDs); i++ {\n\t\tblkID := file.BlkIDs[i]\n\t\tvar buf bytes.Buffer\n\t\tif end-start+1 <= blkSize[i] {\n\t\t\terr := blockmgr.Read(repo.StoreID, blkID, &buf)\n\t\t\tif err != nil {\n\t\t\t\tif !isNetworkErr(err) {\n\t\t\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trecvBuf := buf.Bytes()\n\t\t\t_, err = rsp.Write(recvBuf[:end-start+1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tbreak\n\t\t} else {\n\t\t\terr := blockmgr.Read(repo.StoreID, blkID, rsp)\n\t\t\tif err != nil {\n\t\t\t\tif !isNetworkErr(err) {\n\t\t\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tstart += blkSize[i]\n\t\t}\n\t}\n\n\toper := \"web-file-download\"\n\tif operation == \"download-link\" {\n\t\toper = \"link-file-download\"\n\t}\n\tsendStatisticMsg(repo.StoreID, user, oper, end-start+1)\n\n\treturn nil\n}\n\nfunc parseRange(byteRanges string, fileSize uint64) (uint64, uint64, bool) {\n\tstart := strings.Index(byteRanges, \"=\")\n\tend := strings.Index(byteRanges, \"-\")\n\n\tif end < 0 {\n\t\treturn 0, 0, false\n\t}\n\n\tvar startByte, endByte uint64\n\n\tif start+1 == end {\n\t\tretByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64)\n\t\tif err != nil || retByte == 0 {\n\t\t\treturn 0, 0, false\n\t\t}\n\t\tstartByte = fileSize - retByte\n\t\tendByte = fileSize - 1\n\t} else if end+1 == len(byteRanges) {\n\t\tfirstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, false\n\t\t}\n\n\t\tstartByte = firstByte\n\t\tendByte = fileSize - 1\n\t} else {\n\t\tfirstByte, err := strconv.ParseUint(byteRanges[start+1:end], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, false\n\t\t}\n\t\tlastByte, err := strconv.ParseUint(byteRanges[end+1:], 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, false\n\t\t}\n\n\t\tif lastByte > fileSize-1 {\n\t\t\tlastByte = fileSize - 1\n\t\t}\n\n\t\tstartByte = firstByte\n\t\tendByte = lastByte\n\t}\n\n\tif startByte > endByte {\n\t\treturn 0, 0, false\n\t}\n\n\treturn startByte, endByte, true\n}\n\nfunc setCommonHeaders(rsp http.ResponseWriter, r *http.Request, operation, fileName string) {\n\tfileType := parseContentType(fileName)\n\tif fileType != \"\" {\n\t\tvar contentType string\n\t\tif strings.Contains(fileType, \"text\") {\n\t\t\tcontentType = fileType + \"; \" + \"charset=gbk\"\n\t\t} else {\n\t\t\tcontentType = fileType\n\t\t}\n\t\trsp.Header().Set(\"Content-Type\", contentType)\n\t} else {\n\t\trsp.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\t}\n\n\tvar contFileName string\n\tif operation == \"download\" || operation == \"download-link\" ||\n\t\toperation == \"downloadblks\" {\n\t\t// Since the file name downloaded by safari will be garbled, we need to encode the filename.\n\t\t// Safari cannot parse unencoded utf8 characters.\n\t\tcontFileName = fmt.Sprintf(\"attachment;filename*=utf-8''%s;filename=\\\"%s\\\"\", url.PathEscape(fileName), fileName)\n\t} else {\n\t\tcontFileName = fmt.Sprintf(\"inline;filename*=utf-8''%s;filename=\\\"%s\\\"\", url.PathEscape(fileName), fileName)\n\t}\n\trsp.Header().Set(\"Content-Disposition\", contFileName)\n\n\tif fileType != \"image/jpg\" {\n\t\trsp.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\t}\n}\n\nfunc accessBlksCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 3 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1]\n\tblkID := parts[2]\n\taccessInfo, err := parseWebaccessInfo(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\trepoID := accessInfo.repoID\n\top := accessInfo.op\n\tuser := accessInfo.user\n\tid := accessInfo.objID\n\n\tif _, ok := r.Header[\"If-Modified-Since\"]; ok {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\tnow := time.Now()\n\trsp.Header().Set(\"Last-Modified\", now.Format(\"Mon, 2 Jan 2006 15:04:05 GMT\"))\n\trsp.Header().Set(\"Cache-Control\", \"max-age=3600\")\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Bad repo id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\texists, _ := fsmgr.Exists(repo.StoreID, id)\n\tif !exists {\n\t\tmsg := \"Invalid file id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif op != \"downloadblks\" {\n\t\tmsg := \"Operation does not match access token\"\n\t\treturn &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tif err := doBlock(rsp, r, repo, id, user, blkID); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc doBlock(rsp http.ResponseWriter, r *http.Request, repo *repomgr.Repo, fileID string,\n\tuser string, blkID string) *appError {\n\tfile, err := fsmgr.GetSeafile(repo.StoreID, fileID)\n\tif err != nil {\n\t\tmsg := \"Failed to get seafile\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvar found bool\n\tfor _, id := range file.BlkIDs {\n\t\tif id == blkID {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\trsp.WriteHeader(http.StatusBadRequest)\n\t\treturn nil\n\t}\n\n\texists := blockmgr.Exists(repo.StoreID, blkID)\n\tif !exists {\n\t\trsp.WriteHeader(http.StatusBadRequest)\n\t\treturn nil\n\t}\n\n\trsp.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tsetCommonHeaders(rsp, r, \"downloadblks\", blkID)\n\n\tsize, err := blockmgr.Stat(repo.StoreID, blkID)\n\tif err != nil {\n\t\tmsg := \"Failed to stat block\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\tif size == 0 {\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfileSize := fmt.Sprintf(\"%d\", size)\n\trsp.Header().Set(\"Content-Length\", fileSize)\n\n\terr = blockmgr.Read(repo.StoreID, blkID, rsp)\n\tif err != nil {\n\t\tif !isNetworkErr(err) {\n\t\t\tlog.Errorf(\"failed to read block %s: %v\", blkID, err)\n\t\t}\n\t}\n\n\tsendStatisticMsg(repo.StoreID, user, \"web-file-download\", uint64(size))\n\n\treturn nil\n}\n\nfunc accessZipCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) != 2 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1]\n\n\taccessInfo, err := parseWebaccessInfo(token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trepoID := accessInfo.repoID\n\top := accessInfo.op\n\tuser := accessInfo.user\n\tdata := accessInfo.objID\n\n\tif op != \"download-dir\" && op != \"download-dir-link\" &&\n\t\top != \"download-multi\" && op != \"download-multi-link\" {\n\t\tmsg := \"Operation does not match access token\"\n\t\treturn &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tif _, ok := r.Header[\"If-Modified-Since\"]; ok {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\tnow := time.Now()\n\trsp.Header().Set(\"Last-Modified\", now.Format(\"Mon, 2 Jan 2006 15:04:05 GMT\"))\n\trsp.Header().Set(\"Cache-Control\", \"max-age=3600\")\n\n\tif err := downloadZipFile(rsp, r, data, repoID, user, op); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc downloadZipFile(rsp http.ResponseWriter, r *http.Request, data, repoID, user, op string) *appError {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\tobj := make(map[string]interface{})\n\terr := json.Unmarshal([]byte(data), &obj)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to parse obj data for zip: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tar := zip.NewWriter(rsp)\n\tdefer ar.Close()\n\n\tif op == \"download-dir\" || op == \"download-dir-link\" {\n\t\tdirName, ok := obj[\"dir_name\"].(string)\n\t\tif !ok || dirName == \"\" {\n\t\t\terr := fmt.Errorf(\"invalid download dir data: miss dir_name field\")\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\n\t\tobjID, ok := obj[\"obj_id\"].(string)\n\t\tif !ok || objID == \"\" {\n\t\t\terr := fmt.Errorf(\"invalid download dir data: miss obj_id field\")\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\n\t\tzipName := dirName + \".zip\"\n\t\tsetCommonHeaders(rsp, r, \"download\", zipName)\n\n\t\t// The zip name downloaded by safari will be garbled if we encode the zip name,\n\t\t// because we download zip file using chunk encoding.\n\t\tcontFileName := fmt.Sprintf(\"attachment;filename=\\\"%s\\\";filename*=utf-8''%s\", zipName, url.PathEscape(zipName))\n\t\trsp.Header().Set(\"Content-Disposition\", contFileName)\n\t\trsp.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\n\t\terr := packDir(ar, repo, objID, dirName, cryptKey)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to pack dir %s: %v\", dirName, err)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tdirList, err := parseDirFilelist(repo, obj)\n\t\tif err != nil {\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\n\t\tnow := time.Now()\n\t\tzipName := fmt.Sprintf(\"documents-export-%d-%d-%d.zip\", now.Year(), now.Month(), now.Day())\n\n\t\tsetCommonHeaders(rsp, r, \"download\", zipName)\n\t\tcontFileName := fmt.Sprintf(\"attachment;filename=\\\"%s\\\";filename*=utf8''%s\", zipName, url.PathEscape(zipName))\n\t\trsp.Header().Set(\"Content-Disposition\", contFileName)\n\t\trsp.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\n\t\tfileList := []string{}\n\t\tfor _, v := range dirList {\n\t\t\tuniqueName := genUniqueFileName(v.Name, fileList)\n\t\t\tfileList = append(fileList, uniqueName)\n\t\t\tif fsmgr.IsDir(v.Mode) {\n\t\t\t\tif err := packDir(ar, repo, v.ID, uniqueName, cryptKey); err != nil {\n\t\t\t\t\tif !isNetworkErr(err) {\n\t\t\t\t\t\tlog.Errorf(\"failed to pack dir %s: %v\", v.Name, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := packFiles(ar, &v, repo, \"\", uniqueName, cryptKey); err != nil {\n\t\t\t\t\tif !isNetworkErr(err) {\n\t\t\t\t\t\tlog.Errorf(\"failed to pack file %s: %v\", v.Name, err)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc genUniqueFileName(fileName string, fileList []string) string {\n\tvar uniqueName string\n\tvar name string\n\ti := 1\n\tdot := strings.LastIndex(fileName, \".\")\n\tif dot < 0 {\n\t\tname = fileName\n\t} else {\n\t\tname = fileName[:dot]\n\t}\n\tuniqueName = fileName\n\n\tfor nameInFileList(uniqueName, fileList) {\n\t\tif dot < 0 {\n\t\t\tuniqueName = fmt.Sprintf(\"%s (%d)\", name, i)\n\t\t} else {\n\t\t\tuniqueName = fmt.Sprintf(\"%s (%d).%s\", name, i, fileName[dot+1:])\n\t\t}\n\t\ti++\n\t}\n\n\treturn uniqueName\n}\n\nfunc nameInFileList(fileName string, fileList []string) bool {\n\tfor _, name := range fileList {\n\t\tif name == fileName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc parseDirFilelist(repo *repomgr.Repo, obj map[string]interface{}) ([]fsmgr.SeafDirent, error) {\n\tparentDir, ok := obj[\"parent_dir\"].(string)\n\tif !ok || parentDir == \"\" {\n\t\terr := fmt.Errorf(\"invalid download multi data, miss parent_dir field\")\n\t\treturn nil, err\n\t}\n\n\tdir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir %s repo %s\", parentDir, repo.StoreID)\n\t\treturn nil, err\n\t}\n\n\tfileList, ok := obj[\"file_list\"].([]interface{})\n\tif !ok || fileList == nil {\n\t\terr := fmt.Errorf(\"invalid download multi data, miss file_list field\")\n\t\treturn nil, err\n\t}\n\n\tdirentHash := make(map[string]fsmgr.SeafDirent)\n\tfor _, v := range dir.Entries {\n\t\tdirentHash[v.Name] = *v\n\t}\n\n\tdirentList := make([]fsmgr.SeafDirent, 0)\n\n\tfor _, fileName := range fileList {\n\t\tname, ok := fileName.(string)\n\t\tif !ok {\n\t\t\terr := fmt.Errorf(\"invalid download multi data\")\n\t\t\treturn nil, err\n\t\t}\n\t\tif name == \"\" {\n\t\t\terr := fmt.Errorf(\"invalid download file name\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif strings.Contains(name, \"/\") {\n\t\t\trpath := filepath.Join(parentDir, name)\n\t\t\tdent, err := fsmgr.GetDirentByPath(repo.StoreID, repo.RootID, rpath)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to get path %s for repo %s: %v\", rpath, repo.StoreID, err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdirentList = append(direntList, *dent)\n\t\t} else {\n\t\t\tv, ok := direntHash[name]\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"invalid download multi data\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdirentList = append(direntList, v)\n\t\t}\n\t}\n\n\treturn direntList, nil\n}\n\nfunc packDir(ar *zip.Writer, repo *repomgr.Repo, dirID, dirPath string, cryptKey *seafileCrypt) error {\n\tdirent, err := fsmgr.GetSeafdir(repo.StoreID, dirID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir for zip: %v\", err)\n\t\treturn err\n\t}\n\n\tif dirent.Entries == nil {\n\t\tfileDir := filepath.Join(dirPath)\n\t\tfileDir = strings.TrimLeft(fileDir, \"/\")\n\t\t_, err := ar.Create(fileDir + \"/\")\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to create zip dir: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tentries := dirent.Entries\n\n\tfor _, v := range entries {\n\t\tfileDir := filepath.Join(dirPath, v.Name)\n\t\tfileDir = strings.TrimLeft(fileDir, \"/\")\n\t\tif fsmgr.IsDir(v.Mode) {\n\t\t\tif err := packDir(ar, repo, v.ID, fileDir, cryptKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif err := packFiles(ar, v, repo, dirPath, v.Name, cryptKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc packFiles(ar *zip.Writer, dirent *fsmgr.SeafDirent, repo *repomgr.Repo, parentPath, baseName string, cryptKey *seafileCrypt) error {\n\tfile, err := fsmgr.GetSeafile(repo.StoreID, dirent.ID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafile : %v\", err)\n\t\treturn err\n\t}\n\n\tfilePath := filepath.Join(parentPath, baseName)\n\tfilePath = strings.TrimLeft(filePath, \"/\")\n\n\tfileHeader := new(zip.FileHeader)\n\tfileHeader.Name = filePath\n\tfileHeader.Modified = time.Unix(dirent.Mtime, 0)\n\tfileHeader.Method = zip.Deflate\n\tzipFile, err := ar.CreateHeader(fileHeader)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to create zip file : %v\", err)\n\t\treturn err\n\t}\n\n\tif cryptKey != nil {\n\t\tfor _, blkID := range file.BlkIDs {\n\t\t\tvar buf bytes.Buffer\n\t\t\tblockmgr.Read(repo.StoreID, blkID, &buf)\n\t\t\tdecoded, err := cryptKey.decrypt(buf.Bytes())\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to decrypt block %s: %v\", blkID, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = zipFile.Write(decoded)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor _, blkID := range file.BlkIDs {\n\t\terr := blockmgr.Read(repo.StoreID, blkID, zipFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype recvData struct {\n\tparentDir   string\n\ttokenType   string\n\trepoID      string\n\tuser        string\n\trstart      int64\n\trend        int64\n\tfsize       int64\n\tfileNames   []string\n\tfiles       []string\n\tfileHeaders []*multipart.FileHeader\n}\n\nfunc uploadAPICB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == \"OPTIONS\" {\n\t\tsetAccessControl(rsp)\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUpload(rsp, r, fsm, false); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc setAccessControl(rsp http.ResponseWriter) {\n\trsp.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\trsp.Header().Set(\"Access-Control-Allow-Headers\", \"x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization\")\n\trsp.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, PUT, PATCH, DELETE, OPTIONS\")\n\trsp.Header().Set(\"Access-Control-Max-Age\", \"86400\")\n}\n\nfunc uploadAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == \"OPTIONS\" {\n\t\tsetAccessControl(rsp)\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUpload(rsp, r, fsm, true); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc formatJSONError(rsp http.ResponseWriter, err *appError) {\n\tif err.Message != \"\" {\n\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\terr.Message = fmt.Sprintf(\"{\\\"error\\\": \\\"%s\\\"}\", err.Message)\n\t}\n}\n\nfunc normalizeUTF8Path(p string) string {\n\tnewPath := norm.NFC.Bytes([]byte(p))\n\treturn string(newPath)\n}\n\nfunc doUpload(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError {\n\tsetAccessControl(rsp)\n\n\tif err := r.ParseMultipartForm(1 << 20); err != nil {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\tdefer r.MultipartForm.RemoveAll()\n\n\trepoID := fsm.repoID\n\tuser := fsm.user\n\n\treplaceStr := r.FormValue(\"replace\")\n\tvar replaceExisted bool\n\tif replaceStr != \"\" {\n\t\treplace, err := strconv.ParseInt(replaceStr, 10, 64)\n\t\tif err != nil || (replace != 0 && replace != 1) {\n\t\t\tmsg := \"Invalid argument replace.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tif replace == 1 {\n\t\t\treplaceExisted = true\n\t\t}\n\t}\n\n\tparentDir := normalizeUTF8Path(r.FormValue(\"parent_dir\"))\n\tif parentDir == \"\" {\n\t\tmsg := \"No parent_dir given.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tlastModifyStr := normalizeUTF8Path(r.FormValue(\"last_modify\"))\n\tvar lastModify int64\n\tif lastModifyStr != \"\" {\n\t\tt, err := time.Parse(time.RFC3339, lastModifyStr)\n\t\tif err == nil {\n\t\t\tlastModify = t.Unix()\n\t\t}\n\t}\n\n\trelativePath := normalizeUTF8Path(r.FormValue(\"relative_path\"))\n\tif relativePath != \"\" {\n\t\tif relativePath[0] == '/' || relativePath[0] == '\\\\' {\n\t\t\tmsg := \"Invalid relative path\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t}\n\n\tnewParentDir := filepath.Join(\"/\", parentDir, relativePath)\n\tdefer clearTmpFile(fsm, newParentDir)\n\n\tif fsm.rstart >= 0 {\n\t\tif parentDir[0] != '/' {\n\t\t\tmsg := \"Invalid parent dir\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tformFiles := r.MultipartForm.File\n\t\tfiles, ok := formFiles[\"file\"]\n\t\tif !ok {\n\t\t\tmsg := \"No file in multipart form.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tif len(files) > 1 {\n\t\t\tmsg := \"More files in one request\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\terr := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, newParentDir)\n\t\tif err != nil {\n\t\t\tmsg := \"Internal error.\\n\"\n\t\t\terr := fmt.Errorf(\"failed to write block data to tmp file: %v\", err)\n\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t}\n\n\t\tif fsm.rend != fsm.fsize-1 {\n\t\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\t\tsuccess := \"{\\\"success\\\": true}\"\n\t\t\trsp.Write([]byte(success))\n\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tformFiles := r.MultipartForm.File\n\t\tfileHeaders, ok := formFiles[\"file\"]\n\t\tif !ok {\n\t\t\tmsg := \"No file in multipart form.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tfor _, handler := range fileHeaders {\n\t\t\tfileName := filepath.Base(handler.Filename)\n\t\t\tfsm.fileNames = append(fsm.fileNames, normalizeUTF8Path(fileName))\n\t\t\tfsm.fileHeaders = append(fsm.fileHeaders, handler)\n\t\t}\n\t}\n\n\tif fsm.fileNames == nil {\n\t\tmsg := \"No file uploaded.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif err := checkParentDir(repoID, parentDir); err != nil {\n\t\treturn err\n\t}\n\n\tif !isParentMatched(fsm.parentDir, parentDir) {\n\t\tmsg := \"Parent dir doesn't match.\"\n\t\treturn &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tif err := checkTmpFileList(fsm); err != nil {\n\t\treturn err\n\t}\n\n\tvar contentLen int64\n\tif fsm.fsize > 0 {\n\t\tcontentLen = fsm.fsize\n\t} else {\n\t\tlenstr := r.Header.Get(\"Content-Length\")\n\t\tif lenstr == \"\" {\n\t\t\tcontentLen = -1\n\t\t} else {\n\t\t\ttmpLen, err := strconv.ParseInt(lenstr, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tmsg := \"Internal error.\\n\"\n\t\t\t\terr := fmt.Errorf(\"failed to parse content len: %v\", err)\n\t\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t\t}\n\t\t\tcontentLen = tmpLen\n\t\t}\n\t}\n\n\tret, err := checkQuota(repoID, contentLen)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"failed to check quota: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\tif err := createRelativePath(repoID, parentDir, relativePath, user); err != nil {\n\t\treturn err\n\t}\n\n\tif err := postMultiFiles(rsp, r, repoID, newParentDir, user, fsm,\n\t\treplaceExisted, lastModify, isAjax); err != nil {\n\t\treturn err\n\t}\n\n\toper := \"web-file-upload\"\n\tif fsm.tokenType == \"upload-link\" {\n\t\toper = \"link-file-upload\"\n\t}\n\n\tsendStatisticMsg(repoID, user, oper, uint64(contentLen))\n\n\treturn nil\n}\n\nfunc writeBlockDataToTmpFile(r *http.Request, fsm *recvData, formFiles map[string][]*multipart.FileHeader,\n\trepoID, parentDir string) error {\n\thttpTempDir := filepath.Join(absDataDir, \"httptemp\")\n\n\tfileHeaders, ok := formFiles[\"file\"]\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to get file from multipart form\")\n\t\treturn err\n\t}\n\n\tfilename, err := getFileNameFromMimeHeader(r)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get filename from mime header: %w\", err)\n\t}\n\n\thandler := fileHeaders[0]\n\tfile, err := handler.Open()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to open file for read: %v\", err)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tvar f *os.File\n\tfilePath := filepath.Join(\"/\", parentDir, filename)\n\ttmpFile, err := repomgr.GetUploadTmpFile(repoID, filePath)\n\tif err != nil || tmpFile == \"\" {\n\t\ttmpDir := filepath.Join(httpTempDir, \"cluster-shared\")\n\t\tf, err = os.CreateTemp(tmpDir, filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trepomgr.AddUploadTmpFile(repoID, filePath, f.Name())\n\t\ttmpFile = f.Name()\n\t} else {\n\t\tf, err = os.OpenFile(tmpFile, os.O_WRONLY|os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif fsm.rend == fsm.fsize-1 {\n\t\tfsm.fileNames = append(fsm.fileNames, filepath.Base(filename))\n\t\tfsm.files = append(fsm.files, tmpFile)\n\t}\n\n\tf.Seek(fsm.rstart, 0)\n\tio.Copy(f, file)\n\tf.Close()\n\n\treturn nil\n}\n\nfunc getFileNameFromMimeHeader(r *http.Request) (string, error) {\n\tdisposition := r.Header.Get(\"Content-Disposition\")\n\tif disposition == \"\" {\n\t\terr := fmt.Errorf(\"missing content disposition\")\n\t\treturn \"\", err\n\t}\n\n\t_, params, err := mime.ParseMediaType(disposition)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to parse Content-Disposition: %v\", err)\n\t\treturn \"\", err\n\t}\n\tfilename, err := url.QueryUnescape(params[\"filename\"])\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get filename: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\treturn normalizeUTF8Path(filename), nil\n}\n\nfunc createRelativePath(repoID, parentDir, relativePath, user string) *appError {\n\tif relativePath == \"\" {\n\t\treturn nil\n\t}\n\n\terr := mkdirWithParents(repoID, parentDir, relativePath, user)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"Failed to create parent directory: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\treturn nil\n}\n\nfunc mkdirWithParents(repoID, parentDir, newDirPath, user string) error {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %s\", repoID)\n\t\treturn err\n\t}\n\n\theadCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\treturn err\n\t}\n\n\trelativeDirCan := getCanonPath(newDirPath)\n\n\tsubFolders := strings.Split(relativeDirCan, \"/\")\n\n\tfor _, name := range subFolders {\n\t\tif name == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif shouldIgnoreFile(name) {\n\t\t\terr := fmt.Errorf(\"invalid dir name %s\", name)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar rootID string\n\tvar parentDirCan string\n\tif parentDir == \"/\" || parentDir == \"\\\\\" {\n\t\tparentDirCan = \"/\"\n\t} else {\n\t\tparentDirCan = getCanonPath(parentDir)\n\t}\n\n\tabsPath, dirID, err := checkAndCreateDir(repo, headCommit.RootID, parentDirCan, subFolders)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to check and create dir: %v\", err)\n\t\treturn err\n\t}\n\tif absPath == \"\" {\n\t\treturn nil\n\t}\n\tnewRootID := headCommit.RootID\n\tmtime := time.Now().Unix()\n\tmode := (syscall.S_IFDIR | 0644)\n\tdent := fsmgr.NewDirent(dirID, filepath.Base(absPath), uint32(mode), mtime, \"\", 0)\n\n\tvar names []string\n\trootID, _ = doPostMultiFiles(repo, newRootID, filepath.Dir(absPath), []*fsmgr.SeafDirent{dent}, user, false, &names)\n\tif rootID == \"\" {\n\t\terr := fmt.Errorf(\"failed to put dir\")\n\t\treturn err\n\t}\n\n\tbuf := fmt.Sprintf(\"Added directory \\\"%s\\\"\", relativeDirCan)\n\t_, err = genNewCommit(repo, headCommit, rootID, user, buf, true, \"\", false)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to generate new commit: %v\", err)\n\t\treturn err\n\t}\n\n\tgo mergeVirtualRepoPool.AddTask(repo.ID, \"\")\n\n\treturn nil\n}\n\nfunc checkAndCreateDir(repo *repomgr.Repo, rootID, parentDir string, subFolders []string) (string, string, error) {\n\tstoreID := repo.StoreID\n\tdir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"parent_dir %s doesn't exist in repo %s\", parentDir, storeID)\n\t\treturn \"\", \"\", err\n\t}\n\n\tentries := dir.Entries\n\tvar exists bool\n\tvar absPath string\n\tvar dirList []string\n\tfor i, dirName := range subFolders {\n\t\tfor _, de := range entries {\n\t\t\tif de.Name == dirName {\n\t\t\t\texists = true\n\t\t\t\tsubDir, err := fsmgr.GetSeafdir(storeID, de.ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr := fmt.Errorf(\"failed to get seaf dir: %v\", err)\n\t\t\t\t\treturn \"\", \"\", err\n\t\t\t\t}\n\t\t\t\tentries = subDir.Entries\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !exists {\n\t\t\trelativePath := filepath.Join(subFolders[:i+1]...)\n\t\t\tabsPath = filepath.Join(parentDir, relativePath)\n\t\t\tdirList = subFolders[i:]\n\t\t\tbreak\n\t\t}\n\t\texists = false\n\t}\n\tif dirList != nil {\n\t\tdirList = dirList[1:]\n\t}\n\tif len(dirList) == 0 {\n\t\treturn absPath, \"\", nil\n\t}\n\n\tdirID, err := genDirRecursive(repo, dirList)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to generate dir recursive: %v\", err)\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn absPath, dirID, nil\n}\n\nfunc genDirRecursive(repo *repomgr.Repo, toPath []string) (string, error) {\n\tif len(toPath) == 1 {\n\t\tuniqueName := toPath[0]\n\t\tmode := (syscall.S_IFDIR | 0644)\n\t\tmtime := time.Now().Unix()\n\t\tdent := fsmgr.NewDirent(\"\", uniqueName, uint32(mode), mtime, \"\", 0)\n\t\tnewdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn newdir.DirID, nil\n\t}\n\n\tret, err := genDirRecursive(repo, toPath[1:])\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to generate dir recursive: %v\", err)\n\t\treturn \"\", err\n\t}\n\tif ret != \"\" {\n\t\tuniqueName := toPath[0]\n\t\tmode := (syscall.S_IFDIR | 0644)\n\t\tmtime := time.Now().Unix()\n\t\tdent := fsmgr.NewDirent(ret, uniqueName, uint32(mode), mtime, \"\", 0)\n\t\tnewdir, err := fsmgr.NewSeafdir(1, []*fsmgr.SeafDirent{dent})\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\t\tret = newdir.DirID\n\t}\n\n\treturn ret, nil\n}\n\nfunc clearTmpFile(fsm *recvData, parentDir string) {\n\tif fsm.rstart >= 0 && fsm.rend == fsm.fsize-1 {\n\t\tfilePath := filepath.Join(\"/\", parentDir, fsm.fileNames[0])\n\t\ttmpFile, err := repomgr.GetUploadTmpFile(fsm.repoID, filePath)\n\t\tif err == nil && tmpFile != \"\" {\n\t\t\tos.Remove(tmpFile)\n\t\t}\n\t\trepomgr.DelUploadTmpFile(fsm.repoID, filePath)\n\t}\n}\n\nfunc parseUploadHeaders(r *http.Request) (*recvData, *appError) {\n\ttokenLen := 36\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 2 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\turlOp := parts[0]\n\tif len(parts[1]) < tokenLen {\n\t\tmsg := \"Invalid URL\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1][:tokenLen]\n\n\taccessInfo, appErr := parseWebaccessInfo(token)\n\tif appErr != nil {\n\t\treturn nil, appErr\n\t}\n\n\trepoID := accessInfo.repoID\n\top := accessInfo.op\n\tuser := accessInfo.user\n\tid := accessInfo.objID\n\n\tstatus, err := repomgr.GetRepoStatus(repoID)\n\tif err != nil {\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif status != repomgr.RepoStatusNormal && status != -1 {\n\t\tmsg := \"Repo status not writable.\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif op == \"upload-link\" {\n\t\top = \"upload\"\n\t}\n\tif strings.Index(urlOp, op) != 0 {\n\t\tmsg := \"Operation does not match access token.\"\n\t\treturn nil, &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tfsm := new(recvData)\n\n\tif op != \"update\" {\n\t\tobj := make(map[string]interface{})\n\t\tif err := json.Unmarshal([]byte(id), &obj); err != nil {\n\t\t\terr := fmt.Errorf(\"failed to decode obj data : %v\", err)\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\n\t\tparentDir, ok := obj[\"parent_dir\"].(string)\n\t\tif !ok || parentDir == \"\" {\n\t\t\terr := fmt.Errorf(\"no parent_dir in access token\")\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tfsm.parentDir = parentDir\n\t}\n\n\tfsm.tokenType = accessInfo.op\n\tfsm.repoID = repoID\n\tfsm.user = user\n\tfsm.rstart = -1\n\tfsm.rend = -1\n\tfsm.fsize = -1\n\n\tranges := r.Header.Get(\"Content-Range\")\n\tif ranges != \"\" {\n\t\tparseContentRange(ranges, fsm)\n\t}\n\n\tvar contentLen int64\n\tlenstr := r.Header.Get(\"Content-Length\")\n\tif lenstr != \"\" {\n\t\tconLen, _ := strconv.ParseInt(lenstr, 10, 64)\n\t\tcontentLen = conLen\n\t\tif contentLen < 0 {\n\t\t\tcontentLen = 0\n\t\t}\n\t\tif fsm.fsize > 0 {\n\t\t\tcontentLen = fsm.fsize\n\t\t}\n\t}\n\tif err := checkQuotaByContentLength(r, repoID, contentLen); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := checkFileSizeByContentLength(r, contentLen); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fsm, nil\n}\n\n// Check whether the file to be uploaded would exceed the quota before receiving the body, in order to avoid unnecessarily receiving the body.\n// After receiving the body, the quota is checked again to handle cases where the Content-Length in the request header is missing, which could make the initial quota check inaccurate.\nfunc checkQuotaByContentLength(r *http.Request, repoID string, contentLen int64) *appError {\n\tif r.Method != \"PUT\" && r.Method != \"POST\" {\n\t\treturn nil\n\t}\n\n\tret, err := checkQuota(repoID, contentLen)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"failed to check quota: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\treturn nil\n}\n\nfunc checkFileSizeByContentLength(r *http.Request, contentLen int64) *appError {\n\tif r.Method != \"PUT\" && r.Method != \"POST\" {\n\t\treturn nil\n\t}\n\n\tif option.MaxUploadSize > 0 && uint64(contentLen) > option.MaxUploadSize {\n\t\tmsg := \"File size is too large.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResTooLarge}\n\t}\n\n\treturn nil\n}\n\nfunc postMultiFiles(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user string, fsm *recvData, replace bool, lastModify int64, isAjax bool) *appError {\n\n\tfileNames := fsm.fileNames\n\tfiles := fsm.files\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get repo %s\", repoID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tcanonPath := getCanonPath(parentDir)\n\n\tif !replace && checkFilesWithSameName(repo, canonPath, fileNames) {\n\t\tmsg := \"Too many files with same name.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tfor _, fileName := range fileNames {\n\t\tif shouldIgnoreFile(fileName) {\n\t\t\tmsg := fmt.Sprintf(\"invalid fileName: %s.\\n\", fileName)\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t}\n\tif strings.Contains(parentDir, \"//\") {\n\t\tmsg := \"parent_dir contains // sequence.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\tgcID, err := repomgr.GetCurrentGCID(repo.StoreID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get current gc id for repo %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tvar ids []string\n\tvar sizes []int64\n\tif fsm.rstart >= 0 {\n\t\tfor _, filePath := range files {\n\t\t\tid, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey)\n\t\t\tif err != nil {\n\t\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\t\terr := fmt.Errorf(\"failed to index blocks: %v\", err)\n\t\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t\t}\n\t\t\t\treturn &appError{nil, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\tids = append(ids, id)\n\t\t\tsizes = append(sizes, size)\n\t\t}\n\t} else {\n\t\tfor _, handler := range fsm.fileHeaders {\n\t\t\tid, size, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, \"\", handler, cryptKey)\n\t\t\tif err != nil {\n\t\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\t\terr := fmt.Errorf(\"failed to index blocks: %v\", err)\n\t\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t\t}\n\t\t\t\treturn &appError{nil, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\tids = append(ids, id)\n\t\t\tsizes = append(sizes, size)\n\t\t}\n\t}\n\n\tretStr, err := postFilesAndGenCommit(fileNames, repo.ID, user, canonPath, replace, ids, sizes, lastModify, gcID)\n\tif err != nil {\n\t\tif errors.Is(err, ErrGCConflict) {\n\t\t\treturn &appError{nil, \"GC Conflict.\\n\", http.StatusConflict}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to post files and gen commit: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\t_, ok := r.Form[\"ret-json\"]\n\tif ok || isAjax {\n\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\trsp.Write([]byte(retStr))\n\t} else {\n\t\tvar array []map[string]interface{}\n\t\terr := json.Unmarshal([]byte(retStr), &array)\n\t\tif err != nil {\n\t\t\tmsg := \"Internal error.\\n\"\n\t\t\terr := fmt.Errorf(\"failed to decode data to json: %v\", err)\n\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t}\n\n\t\tvar ids []string\n\t\tfor _, v := range array {\n\t\t\tid, ok := v[\"id\"].(string)\n\t\t\tif !ok {\n\t\t\t\tmsg := \"Internal error.\\n\"\n\t\t\t\terr := fmt.Errorf(\"failed to assert\")\n\t\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t\t}\n\t\t\tids = append(ids, id)\n\t\t}\n\t\tnewIDs := strings.Join(ids, \"\\t\")\n\t\trsp.Write([]byte(newIDs))\n\t}\n\n\treturn nil\n}\n\nfunc checkFilesWithSameName(repo *repomgr.Repo, canonPath string, fileNames []string) bool {\n\tcommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdir, err := fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfor _, name := range fileNames {\n\t\tuniqueName := genUniqueName(name, dir.Entries)\n\t\tif uniqueName == \"\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc postFilesAndGenCommit(fileNames []string, repoID string, user, canonPath string, replace bool, ids []string, sizes []int64, lastModify int64, lastGCID string) (string, error) {\n\thandleConncurrentUpdate := true\n\tif !replace {\n\t\thandleConncurrentUpdate = false\n\t}\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %s\", repoID)\n\t\treturn \"\", err\n\t}\n\theadCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\treturn \"\", err\n\t}\n\tvar names []string\n\tvar retryCnt int\n\n\tvar dents []*fsmgr.SeafDirent\n\tfor i, name := range fileNames {\n\t\tif i > len(ids)-1 || i > len(sizes)-1 {\n\t\t\tbreak\n\t\t}\n\t\tmode := (syscall.S_IFREG | 0644)\n\t\tmtime := lastModify\n\t\tif mtime <= 0 {\n\t\t\tmtime = time.Now().Unix()\n\t\t}\n\t\tdent := fsmgr.NewDirent(ids[i], name, uint32(mode), mtime, \"\", sizes[i])\n\t\tdents = append(dents, dent)\n\t}\n\nretry:\n\trootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, dents, user, replace, &names)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to post files to %s in repo %s\", canonPath, repo.ID)\n\t\treturn \"\", err\n\t}\n\n\tvar buf string\n\tif len(fileNames) > 1 {\n\t\tbuf = fmt.Sprintf(\"Added \\\"%s\\\" and %d more files.\", fileNames[0], len(fileNames)-1)\n\t} else {\n\t\tbuf = fmt.Sprintf(\"Added \\\"%s\\\".\", fileNames[0])\n\t}\n\n\t_, err = genNewCommit(repo, headCommit, rootID, user, buf, handleConncurrentUpdate, lastGCID, true)\n\tif err != nil {\n\t\tif err != ErrConflict {\n\t\t\terr := fmt.Errorf(\"failed to generate new commit: %w\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tretryCnt++\n\t\t/* Sleep random time between 0 and 3 seconds. */\n\t\trandom := rand.Intn(30) + 1\n\t\tlog.Debugf(\"concurrent upload retry :%d\", retryCnt)\n\t\ttime.Sleep(time.Duration(random*100) * time.Millisecond)\n\t\trepo = repomgr.Get(repoID)\n\t\tif repo == nil {\n\t\t\terr := fmt.Errorf(\"failed to get repo %s\", repoID)\n\t\t\treturn \"\", err\n\t\t}\n\t\theadCommit, err = commitmgr.Load(repo.ID, repo.HeadCommitID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\t\treturn \"\", err\n\t\t}\n\t\tgoto retry\n\t}\n\n\tgo mergeVirtualRepoPool.AddTask(repo.ID, \"\")\n\n\tretJSON, err := formatJSONRet(names, ids, sizes)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to format json data\")\n\t\treturn \"\", err\n\t}\n\n\treturn string(retJSON), nil\n}\n\nfunc formatJSONRet(nameList, idList []string, sizeList []int64) ([]byte, error) {\n\tvar array []map[string]interface{}\n\tfor i := range nameList {\n\t\tif i >= len(idList) || i >= len(sizeList) {\n\t\t\tbreak\n\t\t}\n\t\tobj := make(map[string]interface{})\n\t\tobj[\"name\"] = nameList[i]\n\t\tobj[\"id\"] = idList[i]\n\t\tobj[\"size\"] = sizeList[i]\n\t\tarray = append(array, obj)\n\t}\n\n\tjsonstr, err := json.Marshal(array)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to convert array to json\")\n\t\treturn nil, err\n\t}\n\n\treturn jsonstr, nil\n}\n\nfunc getCanonPath(p string) string {\n\tformatPath := strings.Replace(p, \"\\\\\", \"/\", -1)\n\treturn filepath.Join(formatPath)\n}\n\nvar (\n\tErrConflict   = errors.New(\"Concurent upload conflict\")\n\tErrGCConflict = errors.New(\"GC Conflict\")\n)\n\nfunc genNewCommit(repo *repomgr.Repo, base *commitmgr.Commit, newRoot, user, desc string, handleConncurrentUpdate bool, lastGCID string, checkGC bool) (string, error) {\n\tvar retryCnt int\n\trepoID := repo.ID\n\tcommit := commitmgr.NewCommit(repoID, base.CommitID, newRoot, user, desc)\n\trepomgr.RepoToCommit(repo, commit)\n\terr := commitmgr.Save(commit)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to add commit: %v\", err)\n\t\treturn \"\", err\n\t}\n\tvar commitID string\n\n\tmaxRetryCnt := 10\n\n\tfor {\n\t\tretry, err := genCommitNeedRetry(repo, base, commit, newRoot, user, handleConncurrentUpdate, &commitID, lastGCID, checkGC)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !retry {\n\t\t\tbreak\n\t\t}\n\t\tif !handleConncurrentUpdate {\n\t\t\treturn \"\", ErrConflict\n\t\t}\n\n\t\tif retryCnt < maxRetryCnt {\n\t\t\t/* Sleep random time between 0 and 3 seconds. */\n\t\t\trandom := rand.Intn(30) + 1\n\t\t\ttime.Sleep(time.Duration(random*100) * time.Millisecond)\n\t\t\trepo = repomgr.Get(repoID)\n\t\t\tif repo == nil {\n\t\t\t\terr := fmt.Errorf(\"repo %s doesn't exist\", repoID)\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tretryCnt++\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"stop updating repo %s after %d retries\", repoID, maxRetryCnt)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn commitID, nil\n}\n\nfunc fastForwardOrMerge(user, token string, repo *repomgr.Repo, base, newCommit *commitmgr.Commit) error {\n\tvar retryCnt int\n\tcheckGC, err := repomgr.HasLastGCID(repo.ID, token)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar lastGCID string\n\tif checkGC {\n\t\tlastGCID, _ = repomgr.GetLastGCID(repo.ID, token)\n\t\trepomgr.RemoveLastGCID(repo.ID, token)\n\t}\n\tfor {\n\t\tretry, err := genCommitNeedRetry(repo, base, newCommit, newCommit.RootID, user, true, nil, lastGCID, checkGC)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !retry {\n\t\t\tbreak\n\t\t}\n\n\t\tif retryCnt < 3 {\n\t\t\trandom := rand.Intn(10) + 1\n\t\t\ttime.Sleep(time.Duration(random*100) * time.Millisecond)\n\t\t\tretryCnt++\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"stop updating repo %s after 3 retries\", repo.ID)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc genCommitNeedRetry(repo *repomgr.Repo, base *commitmgr.Commit, commit *commitmgr.Commit, newRoot, user string, handleConncurrentUpdate bool, commitID *string, lastGCID string, checkGC bool) (bool, error) {\n\tvar secondParentID string\n\trepoID := repo.ID\n\tvar mergeDesc string\n\tvar mergedCommit *commitmgr.Commit\n\tcurrentHead, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repoID)\n\t\treturn false, err\n\t}\n\n\tif base.CommitID != currentHead.CommitID {\n\t\tif !handleConncurrentUpdate {\n\t\t\treturn false, ErrConflict\n\t\t}\n\t\troots := []string{base.RootID, currentHead.RootID, newRoot}\n\t\topt := new(mergeOptions)\n\t\topt.remoteRepoID = repoID\n\t\topt.remoteHead = commit.CommitID\n\n\t\terr := mergeTrees(repo.StoreID, roots, opt)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to merge\")\n\t\t\treturn false, err\n\t\t}\n\n\t\tif !opt.conflict {\n\t\t\tmergeDesc = \"Auto merge by system\"\n\t\t} else {\n\t\t\tmergeDesc = genMergeDesc(repo, opt.mergedRoot, currentHead.RootID, newRoot)\n\t\t\tif mergeDesc == \"\" {\n\t\t\t\tmergeDesc = \"Auto merge by system\"\n\t\t\t}\n\t\t}\n\n\t\tsecondParentID = commit.CommitID\n\t\tmergedCommit = commitmgr.NewCommit(repoID, currentHead.CommitID, opt.mergedRoot, user, mergeDesc)\n\t\trepomgr.RepoToCommit(repo, mergedCommit)\n\t\tmergedCommit.SecondParentID.SetValid(commit.CommitID)\n\t\tmergedCommit.NewMerge = 1\n\t\tif opt.conflict {\n\t\t\tmergedCommit.Conflict = 1\n\t\t}\n\n\t\terr = commitmgr.Save(mergedCommit)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to add commit: %v\", err)\n\t\t\treturn false, err\n\t\t}\n\t} else {\n\t\tmergedCommit = commit\n\t}\n\n\tgcConflict, err := updateBranch(repoID, repo.StoreID, mergedCommit.CommitID, currentHead.CommitID, secondParentID, checkGC, lastGCID)\n\tif gcConflict {\n\t\treturn false, err\n\t}\n\tif err != nil {\n\t\treturn true, nil\n\t}\n\n\tif commitID != nil {\n\t\t*commitID = mergedCommit.CommitID\n\t}\n\treturn false, nil\n}\n\nfunc genMergeDesc(repo *repomgr.Repo, mergedRoot, p1Root, p2Root string) string {\n\tvar results []*diff.DiffEntry\n\terr := diff.DiffMergeRoots(repo.StoreID, mergedRoot, p1Root, p2Root, &results, true)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tdesc := diff.DiffResultsToDesc(results)\n\n\treturn desc\n}\n\nfunc updateBranch(repoID, originRepoID, newCommitID, oldCommitID, secondParentID string, checkGC bool, lastGCID string) (gcConflict bool, err error) {\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\ttrans, err := seafileDB.BeginTx(ctx, nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to start transaction: %v\", err)\n\t\treturn false, err\n\t}\n\n\tvar row *sql.Row\n\tvar sqlStr string\n\tif checkGC {\n\t\tsqlStr = \"SELECT gc_id FROM GCID WHERE repo_id = ? FOR UPDATE\"\n\t\tif originRepoID == \"\" {\n\t\t\trow = trans.QueryRowContext(ctx, sqlStr, repoID)\n\t\t} else {\n\t\t\trow = trans.QueryRowContext(ctx, sqlStr, originRepoID)\n\t\t}\n\t\tvar gcID sql.NullString\n\t\tif err := row.Scan(&gcID); err != nil {\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\ttrans.Rollback()\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tif lastGCID != gcID.String {\n\t\t\terr = fmt.Errorf(\"Head branch update for repo %s conflicts with GC.\", repoID)\n\t\t\ttrans.Rollback()\n\t\t\treturn true, ErrGCConflict\n\t\t}\n\t}\n\n\tvar commitID string\n\tname := \"master\"\n\tsqlStr = \"SELECT commit_id FROM Branch WHERE name = ? AND repo_id = ? FOR UPDATE\"\n\n\trow = trans.QueryRowContext(ctx, sqlStr, name, repoID)\n\tif err := row.Scan(&commitID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\ttrans.Rollback()\n\t\t\treturn false, err\n\t\t}\n\t}\n\tif oldCommitID != commitID {\n\t\ttrans.Rollback()\n\t\terr := fmt.Errorf(\"head commit id has changed\")\n\t\treturn false, err\n\t}\n\n\tsqlStr = \"UPDATE Branch SET commit_id = ? WHERE name = ? AND repo_id = ?\"\n\t_, err = trans.ExecContext(ctx, sqlStr, newCommitID, name, repoID)\n\tif err != nil {\n\t\ttrans.Rollback()\n\t\treturn false, err\n\t}\n\n\ttrans.Commit()\n\n\tif secondParentID != \"\" {\n\t\tif err := onBranchUpdated(repoID, secondParentID, false); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tif err := onBranchUpdated(repoID, newCommitID, true); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn false, nil\n}\n\nfunc onBranchUpdated(repoID string, commitID string, updateRepoInfo bool) error {\n\tif updateRepoInfo {\n\t\tif err := repomgr.UpdateRepoInfo(repoID, commitID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif option.EnableNotification {\n\t\tnotifRepoUpdate(repoID, commitID)\n\t}\n\n\tisVirtual, err := repomgr.IsVirtualRepo(repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isVirtual {\n\t\treturn nil\n\t}\n\tpublishUpdateEvent(repoID, commitID)\n\treturn nil\n}\n\ntype notifEvent struct {\n\tType    string           `json:\"type\"`\n\tContent *repoUpdateEvent `json:\"content\"`\n}\ntype repoUpdateEvent struct {\n\tRepoID   string `json:\"repo_id\"`\n\tCommitID string `json:\"commit_id\"`\n}\n\nfunc notifRepoUpdate(repoID string, commitID string) error {\n\tcontent := new(repoUpdateEvent)\n\tcontent.RepoID = repoID\n\tcontent.CommitID = commitID\n\tevent := new(notifEvent)\n\tevent.Type = \"repo-update\"\n\tevent.Content = content\n\tmsg, err := json.Marshal(event)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to encode repo update event: %v\", err)\n\t\treturn err\n\t}\n\n\turl := fmt.Sprintf(\"%s/events\", option.NotificationURL)\n\texp := time.Now().Add(time.Second * 300).Unix()\n\ttoken, err := utils.GenNotifJWTToken(repoID, \"\", exp)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to generate jwt token: %v\", err)\n\t\treturn err\n\t}\n\theader := map[string][]string{\n\t\t\"Authorization\": {\"Token \" + token},\n\t}\n\t_, _, err = utils.HttpCommon(\"POST\", url, header, bytes.NewReader(msg))\n\tif err != nil {\n\t\tlog.Warnf(\"failed to send repo update event: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc doPostMultiFiles(repo *repomgr.Repo, rootID, parentDir string, dents []*fsmgr.SeafDirent, user string, replace bool, names *[]string) (string, error) {\n\tif parentDir[0] == '/' {\n\t\tparentDir = parentDir[1:]\n\t}\n\n\tid, err := postMultiFilesRecursive(repo, rootID, parentDir, user, dents, replace, names)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to post multi files: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}\n\nfunc postMultiFilesRecursive(repo *repomgr.Repo, dirID, toPath, user string, dents []*fsmgr.SeafDirent, replace bool, names *[]string) (string, error) {\n\tolddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir\")\n\t\treturn \"\", err\n\t}\n\n\tvar ret string\n\n\tif toPath == \"\" {\n\t\terr := addNewEntries(repo, user, &olddir.Entries, dents, replace, names)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to add new entries: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tnewdir, err := fsmgr.NewSeafdir(1, olddir.Entries)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn newdir.DirID, nil\n\t}\n\n\tvar remain string\n\tfirstName := toPath\n\tif slash := strings.Index(toPath, \"/\"); slash >= 0 {\n\t\tremain = toPath[slash+1:]\n\t\tfirstName = toPath[:slash]\n\t}\n\n\tentries := olddir.Entries\n\tfor i, dent := range entries {\n\t\tif dent.Name != firstName {\n\t\t\tcontinue\n\t\t}\n\n\t\tid, err := postMultiFilesRecursive(repo, dent.ID, remain, user, dents, replace, names)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to post dirent %s: %v\", dent.Name, err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tret = id\n\t\tif id != \"\" {\n\t\t\tentries[i].ID = id\n\t\t\tentries[i].Mtime = time.Now().Unix()\n\t\t}\n\t\tbreak\n\t}\n\n\tif ret != \"\" {\n\t\tnewdir, err := fsmgr.NewSeafdir(1, entries)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\t\tret = newdir.DirID\n\t} else {\n\t\t// The ret will be an empty string when failed to find parent dir, an error should be returned in such case.\n\t\terr := fmt.Errorf(\"failed to find parent dir for %s\", toPath)\n\t\treturn \"\", err\n\t}\n\n\treturn ret, nil\n}\n\nfunc addNewEntries(repo *repomgr.Repo, user string, oldDents *[]*fsmgr.SeafDirent, newDents []*fsmgr.SeafDirent, replaceExisted bool, names *[]string) error {\n\tfor _, dent := range newDents {\n\t\tvar replace bool\n\t\tvar uniqueName string\n\t\tif replaceExisted {\n\t\t\tfor i, entry := range *oldDents {\n\t\t\t\tif entry.Name == dent.Name {\n\t\t\t\t\treplace = true\n\t\t\t\t\t*oldDents = append((*oldDents)[:i], (*oldDents)[i+1:]...)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif replace {\n\t\t\tuniqueName = dent.Name\n\t\t} else {\n\t\t\tuniqueName = genUniqueName(dent.Name, *oldDents)\n\t\t}\n\t\tif uniqueName != \"\" {\n\t\t\tnewDent := fsmgr.NewDirent(dent.ID, uniqueName, dent.Mode, dent.Mtime, user, dent.Size)\n\t\t\t*oldDents = append(*oldDents, newDent)\n\t\t\t*names = append(*names, uniqueName)\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to generate unique name for %s\", dent.Name)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsort.Sort(Dirents(*oldDents))\n\n\treturn nil\n}\n\nfunc genUniqueName(fileName string, entries []*fsmgr.SeafDirent) string {\n\tvar uniqueName string\n\tvar name string\n\ti := 1\n\tdot := strings.LastIndex(fileName, \".\")\n\tif dot < 0 {\n\t\tname = fileName\n\t} else {\n\t\tname = fileName[:dot]\n\t}\n\tuniqueName = fileName\n\tfor nameExists(entries, uniqueName) && i <= duplicateNamesCount {\n\t\tif dot < 0 {\n\t\t\tuniqueName = fmt.Sprintf(\"%s (%d)\", name, i)\n\t\t} else {\n\t\t\tuniqueName = fmt.Sprintf(\"%s (%d).%s\", name, i, fileName[dot+1:])\n\t\t}\n\t\ti++\n\t}\n\n\tif i <= duplicateNamesCount {\n\t\treturn uniqueName\n\t}\n\n\treturn \"\"\n}\n\nfunc nameExists(entries []*fsmgr.SeafDirent, fileName string) bool {\n\tfor _, entry := range entries {\n\t\tif entry.Name == fileName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc shouldIgnore(fileName string) bool {\n\tparts := strings.Split(fileName, \"/\")\n\tfor _, name := range parts {\n\t\tif name == \"..\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc shouldIgnoreFile(fileName string) bool {\n\tif shouldIgnore(fileName) {\n\t\treturn true\n\t}\n\n\tif !utf8.ValidString(fileName) {\n\t\tlog.Warnf(\"file name %s contains non-UTF8 characters, skip\", fileName)\n\t\treturn true\n\t}\n\n\tif len(fileName) >= 256 {\n\t\treturn true\n\t}\n\n\tif strings.Contains(fileName, \"/\") {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc indexBlocks(ctx context.Context, repoID string, version int, filePath string, handler *multipart.FileHeader, cryptKey *seafileCrypt) (string, int64, error) {\n\treq := &indexFileRequest{\n\t\tctx:      ctx,\n\t\trepoID:   repoID,\n\t\tversion:  version,\n\t\tfilePath: filePath,\n\t\thandler:  handler,\n\t\tcryptKey: cryptKey,\n\t}\n\n\trecvChan := make(chan *indexFileResult)\n\n\tindexFilePool.AddTask(recvChan, req)\n\tresult := <-recvChan\n\treturn result.fileID, result.size, result.err\n}\n\ntype indexFileRequest struct {\n\tctx      context.Context\n\trepoID   string\n\tversion  int\n\tfilePath string\n\thandler  *multipart.FileHeader\n\tcryptKey *seafileCrypt\n}\n\ntype indexFileResult struct {\n\tfileID string\n\tsize   int64\n\terr    error\n}\n\nfunc indexFileWorker(args ...any) error {\n\tresChan := args[0].(chan *indexFileResult)\n\treq := args[1].(*indexFileRequest)\n\n\tctx := req.ctx\n\trepoID := req.repoID\n\tversion := req.version\n\tfilePath := req.filePath\n\thandler := req.handler\n\tcryptKey := req.cryptKey\n\n\tvar size int64\n\tif handler != nil {\n\t\tsize = handler.Size\n\t} else {\n\t\tf, err := os.Open(filePath)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to open file: %s: %v\", filePath, err)\n\t\t\tresChan <- &indexFileResult{err: err}\n\t\t\treturn nil\n\t\t}\n\t\tdefer f.Close()\n\t\tfileInfo, err := f.Stat()\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to stat file %s: %v\", filePath, err)\n\t\t\tresChan <- &indexFileResult{err: err}\n\t\t\treturn nil\n\t\t}\n\t\tsize = fileInfo.Size()\n\t}\n\n\tif size == 0 {\n\t\tresChan <- &indexFileResult{fileID: fsmgr.EmptySha1, size: 0}\n\t\treturn nil\n\t}\n\n\tchunkJobs := make(chan chunkingData, 10)\n\tresults := make(chan chunkingResult, 10)\n\tgo createChunkPool(ctx, int(option.MaxIndexingThreads), chunkJobs, results)\n\n\tvar blkSize int64\n\tvar offset int64\n\n\tjobNum := (uint64(size) + option.FixedBlockSize - 1) / option.FixedBlockSize\n\tblkIDs := make([]string, jobNum)\n\n\tleft := size\n\tfor {\n\t\tif uint64(left) >= option.FixedBlockSize {\n\t\t\tblkSize = int64(option.FixedBlockSize)\n\t\t} else {\n\t\t\tblkSize = left\n\t\t}\n\t\tif left > 0 {\n\t\t\tjob := chunkingData{repoID, filePath, handler, offset, cryptKey}\n\t\t\tselect {\n\t\t\tcase chunkJobs <- job:\n\t\t\t\tleft -= blkSize\n\t\t\t\toffset += blkSize\n\t\t\tcase result := <-results:\n\t\t\t\tif result.err != nil {\n\t\t\t\t\tclose(chunkJobs)\n\n\t\t\t\t\tgo RecoverWrapper(func() {\n\t\t\t\t\t\tfor result := range results {\n\t\t\t\t\t\t\t_ = result\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\tresChan <- &indexFileResult{err: result.err}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tblkIDs[result.idx] = result.blkID\n\t\t\t}\n\t\t} else {\n\t\t\tclose(chunkJobs)\n\t\t\tfor result := range results {\n\t\t\t\tif result.err != nil {\n\t\t\t\t\tgo RecoverWrapper(func() {\n\t\t\t\t\t\tfor result := range results {\n\t\t\t\t\t\t\t_ = result\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t\tresChan <- &indexFileResult{err: result.err}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tblkIDs[result.idx] = result.blkID\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfileID, err := writeSeafile(repoID, version, size, blkIDs)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to write seafile: %v\", err)\n\t\tresChan <- &indexFileResult{err: err}\n\t\treturn nil\n\t}\n\n\tresChan <- &indexFileResult{fileID: fileID, size: size}\n\treturn nil\n}\n\nfunc writeSeafile(repoID string, version int, fileSize int64, blkIDs []string) (string, error) {\n\tseafile, err := fsmgr.NewSeafile(version, fileSize, blkIDs)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafile: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\terr = fsmgr.SaveSeafile(repoID, seafile)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save seafile %s/%s\", repoID, seafile.FileID)\n\t\treturn \"\", err\n\t}\n\n\treturn seafile.FileID, nil\n}\n\ntype chunkingData struct {\n\trepoID   string\n\tfilePath string\n\thandler  *multipart.FileHeader\n\toffset   int64\n\tcryptKey *seafileCrypt\n}\n\ntype chunkingResult struct {\n\tidx   int64\n\tblkID string\n\terr   error\n}\n\nfunc createChunkPool(ctx context.Context, n int, chunkJobs chan chunkingData, res chan chunkingResult) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < n; i++ {\n\t\twg.Add(1)\n\t\tgo chunkingWorker(ctx, &wg, chunkJobs, res)\n\t}\n\twg.Wait()\n\tclose(res)\n}\n\nfunc chunkingWorker(ctx context.Context, wg *sync.WaitGroup, chunkJobs chan chunkingData, res chan chunkingResult) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\tfor job := range chunkJobs {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\terr := context.Canceled\n\t\t\tresult := chunkingResult{-1, \"\", err}\n\t\t\tres <- result\n\t\t\twg.Done()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tjob := job\n\t\tblkID, err := chunkFile(job)\n\t\tidx := job.offset / int64(option.FixedBlockSize)\n\t\tresult := chunkingResult{idx, blkID, err}\n\t\tres <- result\n\t}\n\twg.Done()\n}\n\nfunc chunkFile(job chunkingData) (string, error) {\n\trepoID := job.repoID\n\toffset := job.offset\n\tfilePath := job.filePath\n\thandler := job.handler\n\tblkSize := option.FixedBlockSize\n\tcryptKey := job.cryptKey\n\tvar file multipart.File\n\tif handler != nil {\n\t\tf, err := handler.Open()\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to open file for read: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer f.Close()\n\t\tfile = f\n\t} else {\n\t\tf, err := os.Open(filePath)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to open file for read: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer f.Close()\n\t\tfile = f\n\t}\n\t_, err := file.Seek(offset, io.SeekStart)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to seek file: %v\", err)\n\t\treturn \"\", err\n\t}\n\tbuf := make([]byte, blkSize)\n\tn, err := file.Read(buf)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to seek file: %v\", err)\n\t\treturn \"\", err\n\t}\n\tbuf = buf[:n]\n\n\tblkID, err := writeChunk(repoID, buf, int64(n), cryptKey)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to write chunk: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\treturn blkID, nil\n}\n\nfunc writeChunk(repoID string, input []byte, blkSize int64, cryptKey *seafileCrypt) (string, error) {\n\tvar blkID string\n\tif cryptKey != nil && blkSize > 0 {\n\t\tencoded, err := cryptKey.encrypt(input)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to encrypt block: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tcheckSum := sha1.Sum(encoded)\n\t\tblkID = hex.EncodeToString(checkSum[:])\n\t\tif blockmgr.Exists(repoID, blkID) {\n\t\t\treturn blkID, nil\n\t\t}\n\t\treader := bytes.NewReader(encoded)\n\t\terr = blockmgr.Write(repoID, blkID, reader)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to write block: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\tcheckSum := sha1.Sum(input)\n\t\tblkID = hex.EncodeToString(checkSum[:])\n\t\tif blockmgr.Exists(repoID, blkID) {\n\t\t\treturn blkID, nil\n\t\t}\n\t\treader := bytes.NewReader(input)\n\t\terr := blockmgr.Write(repoID, blkID, reader)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to write block: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn blkID, nil\n}\n\nfunc checkTmpFileList(fsm *recvData) *appError {\n\tvar totalSize int64\n\tif fsm.rstart >= 0 {\n\t\tfor _, tmpFile := range fsm.files {\n\t\t\tfileInfo, err := os.Stat(tmpFile)\n\t\t\tif err != nil {\n\t\t\t\tmsg := \"Internal error.\\n\"\n\t\t\t\terr := fmt.Errorf(\"[upload] Failed to stat temp file %s\", tmpFile)\n\t\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t\t}\n\t\t\ttotalSize += fileInfo.Size()\n\t\t}\n\t} else {\n\t\tfor _, handler := range fsm.fileHeaders {\n\t\t\ttotalSize += handler.Size\n\t\t}\n\t}\n\n\tif option.MaxUploadSize > 0 && uint64(totalSize) > option.MaxUploadSize {\n\t\tmsg := \"File size is too large.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResTooLarge}\n\t}\n\n\treturn nil\n}\n\nfunc checkParentDir(repoID string, parentDir string) *appError {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get repo %s\", repoID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tcommit, err := commitmgr.Load(repoID, repo.HeadCommitID)\n\tif err != nil {\n\t\tmsg := \"Failed to get head commit.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get head commit for repo %s\", repoID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tcanonPath := getCanonPath(parentDir)\n\n\t_, err = fsmgr.GetSeafdirByPath(repo.StoreID, commit.RootID, canonPath)\n\tif err != nil {\n\t\tmsg := \"Parent dir doesn't exist.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\treturn nil\n}\n\nfunc isParentMatched(uploadDir, parentDir string) bool {\n\tuploadCanon := filepath.Join(\"/\", uploadDir)\n\tparentCanon := filepath.Join(\"/\", parentDir)\n\treturn uploadCanon == parentCanon\n}\n\nfunc parseContentRange(ranges string, fsm *recvData) bool {\n\tstart := strings.Index(ranges, \"bytes\")\n\tend := strings.Index(ranges, \"-\")\n\tslash := strings.Index(ranges, \"/\")\n\n\tif start < 0 || end < 0 || slash < 0 {\n\t\treturn false\n\t}\n\n\tstartStr := strings.TrimLeft(ranges[start+len(\"bytes\"):end], \" \")\n\tfirstByte, err := strconv.ParseInt(startStr, 10, 64)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tlastByte, err := strconv.ParseInt(ranges[end+1:slash], 10, 64)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tfileSize, err := strconv.ParseInt(ranges[slash+1:], 10, 64)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif firstByte > lastByte || lastByte >= fileSize {\n\t\treturn false\n\t}\n\n\tfsm.rstart = firstByte\n\tfsm.rend = lastByte\n\tfsm.fsize = fileSize\n\n\treturn true\n}\n\ntype webaccessInfo struct {\n\trepoID string\n\tobjID  string\n\top     string\n\tuser   string\n}\n\nfunc parseWebaccessInfo(token string) (*webaccessInfo, *appError) {\n\twebaccess, err := rpcclient.Call(\"seafile_web_query_access_token\", token)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get web access token: %v\", err)\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif webaccess == nil {\n\t\tmsg := \"Access token not found\"\n\t\treturn nil, &appError{err, msg, http.StatusForbidden}\n\t}\n\n\twebaccessMap, ok := webaccess.(map[string]interface{})\n\tif !ok {\n\t\treturn nil, &appError{nil, \"\", http.StatusInternalServerError}\n\t}\n\n\taccessInfo := new(webaccessInfo)\n\trepoID, ok := webaccessMap[\"repo-id\"].(string)\n\tif !ok {\n\t\treturn nil, &appError{nil, \"\", http.StatusInternalServerError}\n\t}\n\taccessInfo.repoID = repoID\n\n\tid, ok := webaccessMap[\"obj-id\"].(string)\n\tif !ok {\n\t\treturn nil, &appError{nil, \"\", http.StatusInternalServerError}\n\t}\n\taccessInfo.objID = id\n\n\top, ok := webaccessMap[\"op\"].(string)\n\tif !ok {\n\t\treturn nil, &appError{nil, \"\", http.StatusInternalServerError}\n\t}\n\taccessInfo.op = op\n\n\tuser, ok := webaccessMap[\"username\"].(string)\n\tif !ok {\n\t\treturn nil, &appError{nil, \"\", http.StatusInternalServerError}\n\t}\n\taccessInfo.user = user\n\n\treturn accessInfo, nil\n}\n\nfunc updateDir(repoID, dirPath, newDirID, user, headID string) (string, error) {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %.10s\", repoID)\n\t\treturn \"\", err\n\t}\n\n\tvar base string\n\tif headID == \"\" {\n\t\tbase = repo.HeadCommitID\n\t} else {\n\t\tbase = headID\n\t}\n\n\theadCommit, err := commitmgr.Load(repo.ID, base)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\treturn \"\", err\n\t}\n\n\tif dirPath == \"/\" {\n\t\tcommitDesc := genCommitDesc(repo, newDirID, headCommit.RootID)\n\t\tif commitDesc == \"\" {\n\t\t\tcommitDesc = \"Auto merge by system\"\n\t\t}\n\t\tnewCommitID, err := genNewCommit(repo, headCommit, newDirID, user, commitDesc, true, \"\", false)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to generate new commit: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn newCommitID, nil\n\t}\n\n\tparent := filepath.Dir(dirPath)\n\tcanonPath := getCanonPath(parent)\n\tdirName := filepath.Base(dirPath)\n\n\tdir, err := fsmgr.GetSeafdirByPath(repo.StoreID, headCommit.RootID, canonPath)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"dir %s doesn't exist in repo %s\", canonPath, repo.StoreID)\n\t\treturn \"\", err\n\t}\n\tvar exists bool\n\tfor _, de := range dir.Entries {\n\t\tif de.Name == dirName {\n\t\t\texists = true\n\t\t}\n\t}\n\tif !exists {\n\t\terr := fmt.Errorf(\"directory %s doesn't exist in repo %s\", dirName, repo.StoreID)\n\t\treturn \"\", err\n\t}\n\n\tnewDent := fsmgr.NewDirent(newDirID, dirName, (syscall.S_IFDIR | 0644), time.Now().Unix(), \"\", 0)\n\n\trootID, err := doPutFile(repo, headCommit.RootID, canonPath, newDent)\n\tif err != nil || rootID == \"\" {\n\t\terr := fmt.Errorf(\"failed to put file\")\n\t\treturn \"\", err\n\t}\n\n\tcommitDesc := genCommitDesc(repo, rootID, headCommit.RootID)\n\tif commitDesc == \"\" {\n\t\tcommitDesc = \"Auto merge by system\"\n\t}\n\n\tnewCommitID, err := genNewCommit(repo, headCommit, rootID, user, commitDesc, true, \"\", false)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to generate new commit: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\tgo updateSizePool.AddTask(repoID)\n\n\treturn newCommitID, nil\n}\n\nfunc genCommitDesc(repo *repomgr.Repo, root, parentRoot string) string {\n\tvar results []*diff.DiffEntry\n\terr := diff.DiffCommitRoots(repo.StoreID, parentRoot, root, &results, true)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tdesc := diff.DiffResultsToDesc(results)\n\n\treturn desc\n}\n\nfunc doPutFile(repo *repomgr.Repo, rootID, parentDir string, dent *fsmgr.SeafDirent) (string, error) {\n\tif strings.Index(parentDir, \"/\") == 0 {\n\t\tparentDir = parentDir[1:]\n\t}\n\n\treturn putFileRecursive(repo, rootID, parentDir, dent)\n}\n\nfunc putFileRecursive(repo *repomgr.Repo, dirID, toPath string, newDent *fsmgr.SeafDirent) (string, error) {\n\tolddir, err := fsmgr.GetSeafdir(repo.StoreID, dirID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir\")\n\t\treturn \"\", err\n\t}\n\tentries := olddir.Entries\n\n\tvar ret string\n\n\tif toPath == \"\" {\n\t\tvar newEntries []*fsmgr.SeafDirent\n\t\tfor _, dent := range entries {\n\t\t\tif dent.Name == newDent.Name {\n\t\t\t\tnewEntries = append(newEntries, newDent)\n\t\t\t} else {\n\t\t\t\tnewEntries = append(newEntries, dent)\n\t\t\t}\n\t\t}\n\n\t\tnewdir, err := fsmgr.NewSeafdir(1, newEntries)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\treturn newdir.DirID, nil\n\t}\n\n\tvar remain string\n\tfirstName := toPath\n\tif slash := strings.Index(toPath, \"/\"); slash >= 0 {\n\t\tremain = toPath[slash+1:]\n\t\tfirstName = toPath[:slash]\n\t}\n\n\tfor _, dent := range entries {\n\t\tif dent.Name != firstName {\n\t\t\tcontinue\n\t\t}\n\t\tid, err := putFileRecursive(repo, dent.ID, remain, newDent)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to put dirent %s: %v\", dent.Name, err)\n\t\t\treturn \"\", err\n\t\t}\n\t\tif id != \"\" {\n\t\t\tdent.ID = id\n\t\t\tdent.Mtime = time.Now().Unix()\n\t\t}\n\t\tret = id\n\t\tbreak\n\t}\n\n\tif ret != \"\" {\n\t\tnewdir, err := fsmgr.NewSeafdir(1, entries)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\terr = fsmgr.SaveSeafdir(repo.StoreID, newdir)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to save seafdir %s/%s\", repo.ID, newdir.DirID)\n\t\t\treturn \"\", err\n\t\t}\n\t\tret = newdir.DirID\n\t} else {\n\t\terr := fmt.Errorf(\"failed to find parent dir for %s\", toPath)\n\t\treturn \"\", err\n\t}\n\n\treturn ret, nil\n}\n\nfunc updateAPICB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == \"OPTIONS\" {\n\t\tsetAccessControl(rsp)\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUpdate(rsp, r, fsm, false); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc updateAjaxCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == \"OPTIONS\" {\n\t\tsetAccessControl(rsp)\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUpdate(rsp, r, fsm, true); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc doUpdate(rsp http.ResponseWriter, r *http.Request, fsm *recvData, isAjax bool) *appError {\n\tsetAccessControl(rsp)\n\n\tif err := r.ParseMultipartForm(1 << 20); err != nil {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\tdefer r.MultipartForm.RemoveAll()\n\n\trepoID := fsm.repoID\n\tuser := fsm.user\n\n\ttargetFile := normalizeUTF8Path(r.FormValue(\"target_file\"))\n\tif targetFile == \"\" {\n\t\tmsg := \"No target_file given.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tlastModifyStr := normalizeUTF8Path(r.FormValue(\"last_modify\"))\n\tvar lastModify int64\n\tif lastModifyStr != \"\" {\n\t\tt, err := time.Parse(time.RFC3339, lastModifyStr)\n\t\tif err == nil {\n\t\t\tlastModify = t.Unix()\n\t\t}\n\t}\n\n\tparentDir := filepath.Dir(targetFile)\n\tfileName := filepath.Base(targetFile)\n\n\tdefer clearTmpFile(fsm, parentDir)\n\n\tif fsm.rstart >= 0 {\n\t\tif parentDir[0] != '/' {\n\t\t\tmsg := \"Invalid parent dir\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tformFiles := r.MultipartForm.File\n\t\tfiles, ok := formFiles[\"file\"]\n\t\tif !ok {\n\t\t\tmsg := \"No file in multipart form.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tif len(files) > 1 {\n\t\t\tmsg := \"More files in one request\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\terr := writeBlockDataToTmpFile(r, fsm, formFiles, repoID, parentDir)\n\t\tif err != nil {\n\t\t\tmsg := \"Internal error.\\n\"\n\t\t\terr := fmt.Errorf(\"failed to write block data to tmp file: %v\", err)\n\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t}\n\n\t\tif fsm.rend != fsm.fsize-1 {\n\t\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\t\tsuccess := \"{\\\"success\\\": true}\"\n\t\t\trsp.Write([]byte(success))\n\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tformFiles := r.MultipartForm.File\n\t\tfileHeaders, ok := formFiles[\"file\"]\n\t\tif !ok {\n\t\t\tmsg := \"No file in multipart form.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tif len(fileHeaders) > 1 {\n\t\t\tmsg := \"More files in one request\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tfor _, handler := range fileHeaders {\n\t\t\tfileName := filepath.Base(handler.Filename)\n\t\t\tfsm.fileNames = append(fsm.fileNames, fileName)\n\t\t\tfsm.fileHeaders = append(fsm.fileHeaders, handler)\n\t\t}\n\t}\n\n\tif fsm.fileNames == nil {\n\t\tmsg := \"No file.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif err := checkParentDir(repoID, parentDir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := checkTmpFileList(fsm); err != nil {\n\t\treturn err\n\t}\n\n\tvar contentLen int64\n\tif fsm.fsize > 0 {\n\t\tcontentLen = fsm.fsize\n\t} else {\n\t\tlenstr := r.Header.Get(\"Content-Length\")\n\t\tif lenstr == \"\" {\n\t\t\tcontentLen = -1\n\t\t} else {\n\t\t\ttmpLen, err := strconv.ParseInt(lenstr, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tmsg := \"Internal error.\\n\"\n\t\t\t\terr := fmt.Errorf(\"failed to parse content len: %v\", err)\n\t\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t\t}\n\t\t\tcontentLen = tmpLen\n\t\t}\n\t}\n\n\tret, err := checkQuota(repoID, contentLen)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"failed to check quota: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\theadIDs, ok := r.Form[\"head\"]\n\tvar headID string\n\tif ok {\n\t\theadID = headIDs[0]\n\t}\n\n\tif err := putFile(rsp, r, repoID, parentDir, user, fileName, fsm, headID, lastModify, isAjax); err != nil {\n\t\treturn err\n\t}\n\n\toper := \"web-file-upload\"\n\tsendStatisticMsg(repoID, user, oper, uint64(contentLen))\n\n\treturn nil\n}\n\nfunc putFile(rsp http.ResponseWriter, r *http.Request, repoID, parentDir, user, fileName string, fsm *recvData, headID string, lastModify int64, isAjax bool) *appError {\n\tfiles := fsm.files\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get repo %s\", repoID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tvar base string\n\tif headID != \"\" {\n\t\tbase = headID\n\t} else {\n\t\tbase = repo.HeadCommitID\n\t}\n\n\theadCommit, err := commitmgr.Load(repo.ID, base)\n\tif err != nil {\n\t\tmsg := \"Failed to get head commit.\\n\"\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tcanonPath := getCanonPath(parentDir)\n\n\tif shouldIgnoreFile(fileName) {\n\t\tmsg := fmt.Sprintf(\"invalid fileName: %s.\\n\", fileName)\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif strings.Contains(parentDir, \"//\") {\n\t\tmsg := \"parent_dir contains // sequence.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\texist, _ := checkFileExists(repo.StoreID, headCommit.RootID, canonPath, fileName)\n\tif !exist {\n\t\tmsg := \"File does not exist.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNotExists}\n\t}\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\tgcID, err := repomgr.GetCurrentGCID(repo.StoreID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get current gc id: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tvar fileID string\n\tvar size int64\n\tif fsm.rstart >= 0 {\n\t\tfilePath := files[0]\n\t\tid, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, filePath, nil, cryptKey)\n\t\tif err != nil {\n\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\terr := fmt.Errorf(\"failed to index blocks: %w\", err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\treturn &appError{nil, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tfileID = id\n\t\tsize = fileSize\n\t} else {\n\t\thandler := fsm.fileHeaders[0]\n\t\tid, fileSize, err := indexBlocks(r.Context(), repo.StoreID, repo.Version, \"\", handler, cryptKey)\n\t\tif err != nil {\n\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\terr := fmt.Errorf(\"failed to index blocks: %w\", err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\treturn &appError{nil, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tfileID = id\n\t\tsize = fileSize\n\t}\n\n\tfullPath := filepath.Join(parentDir, fileName)\n\toldFileID, _, _ := fsmgr.GetObjIDByPath(repo.StoreID, headCommit.RootID, fullPath)\n\tif fileID == oldFileID {\n\t\tif isAjax {\n\t\t\tretJSON, err := formatUpdateJSONRet(fileName, fileID, size)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to format json data\")\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t\trsp.Write(retJSON)\n\t\t} else {\n\t\t\trsp.Write([]byte(fileID))\n\t\t}\n\t\treturn nil\n\t}\n\n\tmtime := time.Now().Unix()\n\tif lastModify > 0 {\n\t\tmtime = lastModify\n\t}\n\tmode := (syscall.S_IFREG | 0644)\n\tnewDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, size)\n\n\tvar names []string\n\trootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, true, &names)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to put file %s to %s in repo %s: %v\", fileName, canonPath, repo.ID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdesc := fmt.Sprintf(\"Modified \\\"%s\\\"\", fileName)\n\t_, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true)\n\tif err != nil {\n\t\tif errors.Is(err, ErrGCConflict) {\n\t\t\treturn &appError{nil, \"GC Conflict.\\n\", http.StatusConflict}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to generate new commit: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\tif isAjax {\n\t\tretJSON, err := formatUpdateJSONRet(fileName, fileID, size)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to format json data\")\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\trsp.Write(retJSON)\n\t} else {\n\t\trsp.Write([]byte(fileID))\n\t}\n\n\tgo mergeVirtualRepoPool.AddTask(repo.ID)\n\n\treturn nil\n}\n\nfunc formatUpdateJSONRet(fileName, fileID string, size int64) ([]byte, error) {\n\tvar array []map[string]interface{}\n\tobj := make(map[string]interface{})\n\tobj[\"name\"] = fileName\n\tobj[\"id\"] = fileID\n\tobj[\"size\"] = size\n\tarray = append(array, obj)\n\n\tjsonstr, err := json.Marshal(array)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to convert array to json\")\n\t\treturn nil, err\n\t}\n\n\treturn jsonstr, nil\n}\n\nfunc checkFileExists(storeID, rootID, parentDir, fileName string) (bool, error) {\n\tdir, err := fsmgr.GetSeafdirByPath(storeID, rootID, parentDir)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"parent_dir %s doesn't exist in repo %s: %v\", parentDir, storeID, err)\n\t\treturn false, err\n\t}\n\n\tvar ret bool\n\tentries := dir.Entries\n\tfor _, de := range entries {\n\t\tif de.Name == fileName {\n\t\t\tret = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n\nfunc uploadBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUploadBlks(rsp, r, fsm); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc doUploadBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError {\n\tif err := r.ParseMultipartForm(1 << 20); err != nil {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\tdefer r.MultipartForm.RemoveAll()\n\n\trepoID := fsm.repoID\n\tuser := fsm.user\n\n\treplaceStr := r.FormValue(\"replace\")\n\tvar replaceExisted bool\n\tif replaceStr != \"\" {\n\t\treplace, err := strconv.ParseInt(replaceStr, 10, 64)\n\t\tif err != nil || (replace != 0 && replace != 1) {\n\t\t\tmsg := \"Invalid argument replace.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tif replace == 1 {\n\t\t\treplaceExisted = true\n\t\t}\n\t}\n\n\tparentDir := normalizeUTF8Path(r.FormValue(\"parent_dir\"))\n\tif parentDir == \"\" {\n\t\tmsg := \"No parent_dir given.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tlastModifyStr := normalizeUTF8Path(r.FormValue(\"last_modify\"))\n\tvar lastModify int64\n\tif lastModifyStr != \"\" {\n\t\tt, err := time.Parse(time.RFC3339, lastModifyStr)\n\t\tif err == nil {\n\t\t\tlastModify = t.Unix()\n\t\t}\n\t}\n\n\tfileName := normalizeUTF8Path(r.FormValue(\"file_name\"))\n\tif fileName == \"\" {\n\t\tmsg := \"No file_name given.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tfileSizeStr := r.FormValue(\"file_size\")\n\tvar fileSize int64 = -1\n\tif fileSizeStr != \"\" {\n\t\tsize, err := strconv.ParseInt(fileSizeStr, 10, 64)\n\t\tif err != nil {\n\t\t\tmsg := \"Invalid argument file_size.\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tfileSize = size\n\t}\n\n\tif fileSize < 0 {\n\t\tmsg := \"Invalid file size.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tcommitOnlyStr, ok := r.Form[\"commitonly\"]\n\tif !ok || len(commitOnlyStr) == 0 {\n\t\tmsg := \"Only commit supported.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif err := checkParentDir(repoID, parentDir); err != nil {\n\t\treturn err\n\t}\n\n\tblockIDsJSON := r.FormValue(\"blockids\")\n\tif blockIDsJSON == \"\" {\n\t\tmsg := \"No blockids given.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tfileID, appErr := commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user, fileSize, replaceExisted, lastModify)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\t_, ok = r.Form[\"ret-json\"]\n\tif ok {\n\t\tobj := make(map[string]interface{})\n\t\tobj[\"id\"] = fileID\n\n\t\tjsonstr, err := json.Marshal(obj)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to convert array to json: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\trsp.Write([]byte(jsonstr))\n\t} else {\n\t\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\trsp.Write([]byte(\"\\\"\"))\n\t\trsp.Write([]byte(fileID))\n\t\trsp.Write([]byte(\"\\\"\"))\n\t}\n\n\treturn nil\n}\n\nfunc commitFileBlocks(repoID, parentDir, fileName, blockIDsJSON, user string, fileSize int64, replace bool, lastModify int64) (string, *appError) {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get repo %s\", repoID)\n\t\treturn \"\", &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\theadCommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\tmsg := \"Failed to get head commit.\\n\"\n\t\terr := fmt.Errorf(\"failed to get head commit for repo %s\", repo.ID)\n\t\treturn \"\", &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tcanonPath := getCanonPath(parentDir)\n\n\tif shouldIgnoreFile(fileName) {\n\t\tmsg := fmt.Sprintf(\"invalid fileName: %s.\\n\", fileName)\n\t\treturn \"\", &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif strings.Contains(parentDir, \"//\") {\n\t\tmsg := \"parent_dir contains // sequence.\\n\"\n\t\treturn \"\", &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvar blkIDs []string\n\terr = json.Unmarshal([]byte(blockIDsJSON), &blkIDs)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to decode data to json: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tappErr := checkQuotaBeforeCommitBlocks(repo.StoreID, blkIDs)\n\tif appErr != nil {\n\t\treturn \"\", appErr\n\t}\n\n\tgcID, err := repomgr.GetCurrentGCID(repo.StoreID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get current gc id: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tfileID, appErr := indexExistedFileBlocks(repoID, repo.Version, blkIDs, fileSize)\n\tif appErr != nil {\n\t\treturn \"\", appErr\n\t}\n\n\tmtime := time.Now().Unix()\n\tif lastModify > 0 {\n\t\tmtime = lastModify\n\t}\n\tmode := (syscall.S_IFREG | 0644)\n\tnewDent := fsmgr.NewDirent(fileID, fileName, uint32(mode), mtime, user, fileSize)\n\tvar names []string\n\trootID, err := doPostMultiFiles(repo, headCommit.RootID, canonPath, []*fsmgr.SeafDirent{newDent}, user, replace, &names)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to post file %s to %s in repo %s: %v\", fileName, canonPath, repo.ID, err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdesc := fmt.Sprintf(\"Added \\\"%s\\\"\", fileName)\n\t_, err = genNewCommit(repo, headCommit, rootID, user, desc, true, gcID, true)\n\tif err != nil {\n\t\tif errors.Is(err, ErrGCConflict) {\n\t\t\treturn \"\", &appError{nil, \"GC Conflict.\\n\", http.StatusConflict}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to generate new commit: %v\", err)\n\t\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\treturn fileID, nil\n}\n\nfunc checkQuotaBeforeCommitBlocks(storeID string, blockIDs []string) *appError {\n\tvar totalSize int64\n\tfor _, blkID := range blockIDs {\n\t\tsize, err := blockmgr.Stat(storeID, blkID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to stat block %s in store %s: %v\", blkID, storeID, err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\ttotalSize += size\n\t}\n\tret, err := checkQuota(storeID, totalSize)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"failed to check quota: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\treturn nil\n}\n\nfunc indexExistedFileBlocks(repoID string, version int, blkIDs []string, fileSize int64) (string, *appError) {\n\tif len(blkIDs) == 0 {\n\t\treturn fsmgr.EmptySha1, nil\n\t}\n\n\tfor _, blkID := range blkIDs {\n\t\tif !blockmgr.Exists(repoID, blkID) {\n\t\t\terr := fmt.Errorf(\"failed to check block: %s\", blkID)\n\t\t\treturn \"\", &appError{err, \"\", seafHTTPResBlockMissing}\n\t\t}\n\t}\n\n\tfileID, err := writeSeafile(repoID, version, fileSize, blkIDs)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to write seafile: %v\", err)\n\t\treturn \"\", &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\treturn fileID, nil\n}\n\nfunc uploadRawBlksAPICB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tfsm, err := parseUploadHeaders(r)\n\tif err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\tif err := doUploadRawBlks(rsp, r, fsm); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc doUploadRawBlks(rsp http.ResponseWriter, r *http.Request, fsm *recvData) *appError {\n\tif err := r.ParseMultipartForm(1 << 20); err != nil {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\tdefer r.MultipartForm.RemoveAll()\n\n\trepoID := fsm.repoID\n\tuser := fsm.user\n\n\tformFiles := r.MultipartForm.File\n\tfileHeaders, ok := formFiles[\"file\"]\n\tif !ok {\n\t\tmsg := \"No file in multipart form.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\tfor _, handler := range fileHeaders {\n\t\tfileName := filepath.Base(handler.Filename)\n\t\tfsm.fileNames = append(fsm.fileNames, fileName)\n\t\tfsm.fileHeaders = append(fsm.fileHeaders, handler)\n\t}\n\n\tif fsm.fileNames == nil {\n\t\tmsg := \"No file.\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif err := checkTmpFileList(fsm); err != nil {\n\t\treturn err\n\t}\n\n\tif err := postBlocks(repoID, user, fsm); err != nil {\n\t\treturn err\n\t}\n\n\tvar contentLen int64\n\tlenstr := r.Header.Get(\"Content-Length\")\n\tif lenstr != \"\" {\n\t\tconLen, err := strconv.ParseInt(lenstr, 10, 64)\n\t\tif err != nil {\n\t\t\tmsg := \"Internal error.\\n\"\n\t\t\terr := fmt.Errorf(\"failed to parse content len: %v\", err)\n\t\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t\t}\n\t\tcontentLen = conLen\n\t}\n\n\toper := \"web-file-upload\"\n\tsendStatisticMsg(repoID, user, oper, uint64(contentLen))\n\n\trsp.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\trsp.Write([]byte(\"\\\"OK\\\"\"))\n\n\treturn nil\n}\n\nfunc postBlocks(repoID, user string, fsm *recvData) *appError {\n\tblockIDs := fsm.fileNames\n\tfileHeaders := fsm.fileHeaders\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Failed to get repo.\\n\"\n\t\terr := fmt.Errorf(\"Failed to get repo %s\", repoID)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\n\tif err := indexRawBlocks(repo.StoreID, blockIDs, fileHeaders); err != nil {\n\t\terr := fmt.Errorf(\"failed to index file blocks\")\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tgo updateSizePool.AddTask(repo.ID)\n\n\treturn nil\n}\n\nfunc indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.FileHeader) error {\n\tfor i, handler := range fileHeaders {\n\t\tvar buf bytes.Buffer\n\t\tf, err := handler.Open()\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to open file for read: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\t_, err = buf.ReadFrom(f)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to read block: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tcheckSum := sha1.Sum(buf.Bytes())\n\t\tblkID := hex.EncodeToString(checkSum[:])\n\t\tif blkID != blockIDs[i] {\n\t\t\terr := fmt.Errorf(\"block id %s:%s doesn't match content\", blkID, blockIDs[i])\n\t\t\treturn err\n\t\t}\n\n\t\terr = blockmgr.Write(repoID, blkID, &buf)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to write block: %s/%s: %v\", repoID, blkID, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n/*\nfunc uploadLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif seahubPK == \"\" {\n\t\terr := fmt.Errorf(\"no seahub private key is configured\")\n\t\treturn &appError{err, \"\", http.StatusNotFound}\n\t}\n\tif r.Method == \"OPTIONS\" {\n\t\tsetAccessControl(rsp)\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tfsm, err := parseUploadLinkHeaders(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := doUpload(rsp, r, fsm, false); err != nil {\n\t\tformatJSONError(rsp, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc parseUploadLinkHeaders(r *http.Request) (*recvData, *appError) {\n\ttokenLen := 36\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 2 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\tif len(parts[1]) < tokenLen {\n\t\tmsg := \"Invalid URL\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1][:tokenLen]\n\n\tinfo, appErr := queryShareLinkInfo(token, \"upload\")\n\tif appErr != nil {\n\t\treturn nil, appErr\n\t}\n\n\trepoID := info.RepoID\n\tparentDir := normalizeUTF8Path(info.ParentDir)\n\n\tstatus, err := repomgr.GetRepoStatus(repoID)\n\tif err != nil {\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif status != repomgr.RepoStatusNormal && status != -1 {\n\t\tmsg := \"Repo status not writable.\"\n\t\treturn nil, &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tuser, _ := repomgr.GetRepoOwner(repoID)\n\n\tfsm := new(recvData)\n\n\tfsm.parentDir = parentDir\n\tfsm.tokenType = \"upload-link\"\n\tfsm.repoID = repoID\n\tfsm.user = user\n\tfsm.rstart = -1\n\tfsm.rend = -1\n\tfsm.fsize = -1\n\n\tranges := r.Header.Get(\"Content-Range\")\n\tif ranges != \"\" {\n\t\tparseContentRange(ranges, fsm)\n\t}\n\n\treturn fsm, nil\n}\n*/\n\ntype ShareLinkInfo struct {\n\tRepoID    string `json:\"repo_id\"`\n\tFilePath  string `json:\"file_path\"`\n\tParentDir string `json:\"parent_dir\"`\n\tShareType string `json:\"share_type\"`\n}\n\nfunc queryShareLinkInfo(token, cookie, opType, ipAddr, userAgent string) (*ShareLinkInfo, *appError) {\n\ttokenString, err := utils.GenSeahubJWTToken()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to sign jwt token: %v\", err)\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\turl := fmt.Sprintf(\"%s?type=%s\", option.SeahubURL+\"/check-share-link-access/\", opType)\n\theader := map[string][]string{\n\t\t\"Authorization\": {\"Token \" + tokenString},\n\t}\n\tif cookie != \"\" {\n\t\theader[\"Cookie\"] = []string{cookie}\n\t}\n\treq := make(map[string]string)\n\treq[\"token\"] = token\n\tif ipAddr != \"\" {\n\t\treq[\"ip_addr\"] = ipAddr\n\t}\n\tif userAgent != \"\" {\n\t\treq[\"user_agent\"] = userAgent\n\t}\n\tmsg, err := json.Marshal(req)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to encode access token: %v\", err)\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tstatus, body, err := utils.HttpCommon(\"POST\", url, header, bytes.NewReader(msg))\n\tif err != nil {\n\t\tif status != http.StatusInternalServerError {\n\t\t\treturn nil, &appError{nil, string(body), status}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"failed to get share link info: %v\", err)\n\t\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\tinfo := new(ShareLinkInfo)\n\terr = json.Unmarshal(body, &info)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to decode share link info: %v\", err)\n\t\treturn nil, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\treturn info, nil\n}\n\nfunc accessLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif option.JWTPrivateKey == \"\" {\n\t\terr := fmt.Errorf(\"no seahub private key is configured\")\n\t\treturn &appError{err, \"\", http.StatusNotFound}\n\t}\n\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 2 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1]\n\tcookie := r.Header.Get(\"Cookie\")\n\tipAddr := getClientIPAddr(r)\n\tuserAgent := r.Header.Get(\"User-Agent\")\n\tinfo, appErr := queryShareLinkInfo(token, cookie, \"file\", ipAddr, userAgent)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tif info.FilePath == \"\" {\n\t\tmsg := \"Internal server error\\n\"\n\t\terr := fmt.Errorf(\"failed to get file_path by token %s\", token)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif info.ShareType != \"f\" {\n\t\tmsg := \"Link type mismatch\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\trepoID := info.RepoID\n\tfilePath := normalizeUTF8Path(info.FilePath)\n\tfileName := filepath.Base(filePath)\n\n\top := r.URL.Query().Get(\"op\")\n\tif op != \"view\" {\n\t\top = \"download-link\"\n\t}\n\n\tranges := r.Header[\"Range\"]\n\tbyteRanges := strings.Join(ranges, \"\")\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Bad repo id\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tuser, _ := repomgr.GetRepoOwner(repoID)\n\n\tfileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, filePath)\n\tif err != nil {\n\t\tmsg := \"Invalid file_path\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\t// Check for file changes by comparing the ETag in the If-None-Match header with the file ID. Set no-cache to allow clients to validate file changes before using the cache.\n\tetag := r.Header.Get(\"If-None-Match\")\n\tif etag == fileID {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\trsp.Header().Set(\"ETag\", fileID)\n\trsp.Header().Set(\"Cache-Control\", \"public, no-cache\")\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\texists, _ := fsmgr.Exists(repo.StoreID, fileID)\n\tif !exists {\n\t\tmsg := \"Invalid file id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif !repo.IsEncrypted && len(byteRanges) != 0 {\n\t\tif err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n/*\nfunc accessDirLinkCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif seahubPK == \"\" {\n\t\terr := fmt.Errorf(\"no seahub private key is configured\")\n\t\treturn &appError{err, \"\", http.StatusNotFound}\n\t}\n\n\tparts := strings.Split(r.URL.Path[1:], \"/\")\n\tif len(parts) < 2 {\n\t\tmsg := \"Invalid URL\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\ttoken := parts[1]\n\tinfo, appErr := queryShareLinkInfo(token, \"dir\")\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\trepoID := info.RepoID\n\tparentDir := normalizeUTF8Path(info.ParentDir)\n\top := \"download-link\"\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\tmsg := \"Bad repo id\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\tuser, _ := repomgr.GetRepoOwner(repoID)\n\n\tfilePath := r.URL.Query().Get(\"p\")\n\tif filePath == \"\" {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\tmsg := \"Invalid form\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tparentDir := r.FormValue(\"parent_dir\")\n\t\tif parentDir == \"\" {\n\t\t\tmsg := \"Invalid parent_dir\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tparentDir = normalizeUTF8Path(parentDir)\n\t\tparentDir = getCanonPath(parentDir)\n\t\tdirents := r.FormValue(\"dirents\")\n\t\tif dirents == \"\" {\n\t\t\tmsg := \"Invalid dirents\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\t// opStr:=r.FormVale(\"op\")\n\t\tlist, err := jsonToDirentList(repo, parentDir, dirents)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"failed to parse dirent list: %v\", err)\n\t\t\tmsg := \"Invalid dirents\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tif len(list) == 0 {\n\t\t\tmsg := \"Invalid dirents\\n\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tobj := make(map[string]interface{})\n\t\tif len(list) == 1 {\n\t\t\tdent := list[0]\n\t\t\top = \"download-dir-link\"\n\t\t\tobj[\"dir_name\"] = dent.Name\n\t\t\tobj[\"obj_id\"] = dent.ID\n\t\t} else {\n\t\t\top = \"download-multi-link\"\n\t\t\tobj[\"parent_dir\"] = parentDir\n\t\t\tvar fileList []string\n\t\t\tfor _, dent := range list {\n\t\t\t\tfileList = append(fileList, dent.Name)\n\t\t\t}\n\t\t\tobj[\"file_list\"] = fileList\n\t\t}\n\t\tdata, err := json.Marshal(obj)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to encode zip obj: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tif err := downloadZipFile(rsp, r, string(data), repoID, user, op); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// file path is not empty string\n\tif _, ok := r.Header[\"If-Modified-Since\"]; ok {\n\t\treturn &appError{nil, \"\", http.StatusNotModified}\n\t}\n\n\tfilePath = normalizeUTF8Path(filePath)\n\tfullPath := filepath.Join(parentDir, filePath)\n\tfileName := filepath.Base(filePath)\n\n\tfileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, fullPath)\n\tif err != nil {\n\t\tmsg := \"Invalid file_path\\n\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\trsp.Header().Set(\"ETag\", fileID)\n\n\tnow := time.Now()\n\trsp.Header().Set(\"Last-Modified\", now.Format(\"Mon, 2 Jan 2006 15:04:05 GMT\"))\n\trsp.Header().Set(\"Cache-Control\", \"max-age=3600\")\n\n\tranges := r.Header[\"Range\"]\n\tbyteRanges := strings.Join(ranges, \"\")\n\n\tvar cryptKey *seafileCrypt\n\tif repo.IsEncrypted {\n\t\tkey, err := parseCryptKey(rsp, repoID, user, repo.EncVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcryptKey = key\n\t}\n\n\texists, _ := fsmgr.Exists(repo.StoreID, fileID)\n\tif !exists {\n\t\tmsg := \"Invalid file id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif !repo.IsEncrypted && len(byteRanges) != 0 {\n\t\tif err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc jsonToDirentList(repo *repomgr.Repo, parentDir, dirents string) ([]*fsmgr.SeafDirent, error) {\n\tvar list []string\n\terr := json.Unmarshal([]byte(dirents), &list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdirentHash := make(map[string]*fsmgr.SeafDirent)\n\tfor _, dent := range dir.Entries {\n\t\tdirentHash[dent.Name] = dent\n\t}\n\n\tvar direntList []*fsmgr.SeafDirent\n\tfor _, path := range list {\n\t\tnormPath := normalizeUTF8Path(path)\n\t\tif normPath == \"\" || normPath == \"/\" {\n\t\t\treturn nil, fmt.Errorf(\"Invalid download file name: %s\\n\", normPath)\n\t\t}\n\t\tdent, ok := direntHash[normPath]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"failed to get dient for %s in dir %s in repo %s\", normPath, parentDir, repo.StoreID)\n\t\t}\n\t\tdirentList = append(direntList, dent)\n\t}\n\n\treturn direntList, nil\n}\n*/\n\nfunc removeFileopExpireCache() {\n\tdeleteBlockMaps := func(key interface{}, value interface{}) bool {\n\t\tif blkMap, ok := value.(*blockMap); ok {\n\t\t\tif blkMap.expireTime <= time.Now().Unix() {\n\t\t\t\tblockMapCacheTable.Delete(key)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tblockMapCacheTable.Range(deleteBlockMaps)\n}\n"
  },
  {
    "path": "fileserver/fileserver.go",
    "content": "// Main package for Seafile file server.\npackage main\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"database/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"runtime/debug\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/go-sql-driver/mysql\"\n\t\"github.com/gorilla/mux\"\n\t\"github.com/haiwen/seafile-server/fileserver/blockmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/metrics\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/searpc\"\n\t\"github.com/haiwen/seafile-server/fileserver/share\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n\tlog \"github.com/sirupsen/logrus\"\n\n\t\"net/http/pprof\"\n)\n\nvar dataDir, absDataDir string\nvar centralDir string\nvar logFile, absLogFile string\nvar rpcPipePath string\nvar pidFilePath string\nvar logFp *os.File\n\nvar seafileDB, ccnetDB *sql.DB\n\nvar logToStdout bool\n\nfunc init() {\n\tflag.StringVar(&centralDir, \"F\", \"\", \"central config directory\")\n\tflag.StringVar(&dataDir, \"d\", \"\", \"seafile data directory\")\n\tflag.StringVar(&logFile, \"l\", \"\", \"log file path\")\n\tflag.StringVar(&rpcPipePath, \"p\", \"\", \"rpc pipe path\")\n\tflag.StringVar(&pidFilePath, \"P\", \"\", \"pid file path\")\n\n\tenv := os.Getenv(\"SEAFILE_LOG_TO_STDOUT\")\n\tif env == \"true\" {\n\t\tlogToStdout = true\n\t}\n\n\tlog.SetFormatter(&LogFormatter{})\n}\n\nconst (\n\ttimestampFormat = \"[2006-01-02 15:04:05] \"\n)\n\ntype LogFormatter struct{}\n\nfunc (f *LogFormatter) Format(entry *log.Entry) ([]byte, error) {\n\tlevelStr := entry.Level.String()\n\tif levelStr == \"fatal\" {\n\t\tlevelStr = \"ERROR\"\n\t} else {\n\t\tlevelStr = strings.ToUpper(levelStr)\n\t}\n\tlevel := fmt.Sprintf(\"[%s] \", levelStr)\n\tappName := \"\"\n\tif logToStdout {\n\t\tappName = \"[fileserver] \"\n\t}\n\tbuf := make([]byte, 0, len(appName)+len(timestampFormat)+len(level)+len(entry.Message)+1)\n\tif logToStdout {\n\t\tbuf = append(buf, appName...)\n\t}\n\tbuf = entry.Time.AppendFormat(buf, timestampFormat)\n\tbuf = append(buf, level...)\n\tbuf = append(buf, entry.Message...)\n\tbuf = append(buf, '\\n')\n\treturn buf, nil\n}\n\nfunc loadCcnetDB() {\n\tdbOpt, err := option.LoadDBOption(centralDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load database: %v\", err)\n\t}\n\n\tvar dsn string\n\ttimeout := \"&readTimeout=60s\" + \"&writeTimeout=60s\"\n\tif dbOpt.UseTLS && dbOpt.SkipVerify {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout)\n\t} else if dbOpt.UseTLS && !dbOpt.SkipVerify {\n\t\tregisterCA(dbOpt.CaPath)\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=custom%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, timeout)\n\t} else {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=%t%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.CcnetDbName, dbOpt.UseTLS, timeout)\n\t}\n\tif dbOpt.Charset != \"\" {\n\t\tdsn = fmt.Sprintf(\"%s&charset=%s\", dsn, dbOpt.Charset)\n\t}\n\tccnetDB, err = sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open database: %v\", err)\n\t}\n\tccnetDB.SetConnMaxLifetime(5 * time.Minute)\n\tccnetDB.SetMaxOpenConns(8)\n\tccnetDB.SetMaxIdleConns(8)\n}\n\n// registerCA registers CA to verify server cert.\nfunc registerCA(capath string) {\n\trootCertPool := x509.NewCertPool()\n\tpem, err := os.ReadFile(capath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif ok := rootCertPool.AppendCertsFromPEM(pem); !ok {\n\t\tlog.Fatal(\"Failed to append PEM.\")\n\t}\n\tmysql.RegisterTLSConfig(\"custom\", &tls.Config{\n\t\tRootCAs: rootCertPool,\n\t})\n}\n\nfunc loadSeafileDB() {\n\tdbOpt, err := option.LoadDBOption(centralDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load database: %v\", err)\n\t}\n\n\tvar dsn string\n\ttimeout := \"&readTimeout=60s\" + \"&writeTimeout=60s\"\n\tif dbOpt.UseTLS && dbOpt.SkipVerify {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=skip-verify%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout)\n\t} else if dbOpt.UseTLS && !dbOpt.SkipVerify {\n\t\tregisterCA(dbOpt.CaPath)\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=custom%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, timeout)\n\t} else {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=%t%s\", dbOpt.User, dbOpt.Password, dbOpt.Host, dbOpt.Port, dbOpt.SeafileDbName, dbOpt.UseTLS, timeout)\n\t}\n\tif dbOpt.Charset != \"\" {\n\t\tdsn = fmt.Sprintf(\"%s&charset=%s\", dsn, dbOpt.Charset)\n\t}\n\n\tseafileDB, err = sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open database: %v\", err)\n\t}\n\tseafileDB.SetConnMaxLifetime(5 * time.Minute)\n\tseafileDB.SetMaxOpenConns(8)\n\tseafileDB.SetMaxIdleConns(8)\n}\n\nfunc writePidFile(pid_file_path string) error {\n\tfile, err := os.OpenFile(pid_file_path, os.O_CREATE|os.O_WRONLY, 0664)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tpid := os.Getpid()\n\tstr := fmt.Sprintf(\"%d\", pid)\n\t_, err = file.Write([]byte(str))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc removePidfile(pid_file_path string) error {\n\terr := os.Remove(pid_file_path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif centralDir == \"\" {\n\t\tlog.Fatal(\"central config directory must be specified.\")\n\t}\n\n\tif pidFilePath != \"\" {\n\t\tif writePidFile(pidFilePath) != nil {\n\t\t\tlog.Fatal(\"write pid file failed.\")\n\t\t}\n\t}\n\t_, err := os.Stat(centralDir)\n\tif os.IsNotExist(err) {\n\t\tlog.Fatalf(\"central config directory %s doesn't exist: %v.\", centralDir, err)\n\t}\n\n\tif dataDir == \"\" {\n\t\tlog.Fatal(\"seafile data directory must be specified.\")\n\t}\n\t_, err = os.Stat(dataDir)\n\tif os.IsNotExist(err) {\n\t\tlog.Fatalf(\"seafile data directory %s doesn't exist: %v.\", dataDir, err)\n\t}\n\tabsDataDir, err = filepath.Abs(dataDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to convert seafile data dir to absolute path: %v.\", err)\n\t}\n\n\tif logToStdout {\n\t\t// Use default output (StdOut)\n\t} else if logFile == \"\" {\n\t\tabsLogFile = filepath.Join(absDataDir, \"fileserver.log\")\n\t\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to open or create log file: %v\", err)\n\t\t}\n\t\tlogFp = fp\n\t\tlog.SetOutput(fp)\n\t} else if logFile != \"-\" {\n\t\tabsLogFile, err = filepath.Abs(logFile)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to convert log file path to absolute path: %v\", err)\n\t\t}\n\t\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to open or create log file: %v\", err)\n\t\t}\n\t\tlogFp = fp\n\t\tlog.SetOutput(fp)\n\t}\n\n\tif absLogFile != \"\" && !logToStdout {\n\t\tutils.Dup(int(logFp.Fd()), int(os.Stderr.Fd()))\n\t}\n\t// When logFile is \"-\", use default output (StdOut)\n\n\tif err := option.LoadSeahubConfig(); err != nil {\n\t\tlog.Fatalf(\"Failed to read seahub config: %v\", err)\n\t}\n\n\toption.LoadFileServerOptions(centralDir)\n\tloadCcnetDB()\n\tloadSeafileDB()\n\n\tlevel, err := log.ParseLevel(option.LogLevel)\n\tif err != nil {\n\t\tlog.Info(\"use the default log level: info\")\n\t\tlog.SetLevel(log.InfoLevel)\n\t} else {\n\t\tlog.SetLevel(level)\n\t}\n\n\trepomgr.Init(seafileDB)\n\n\tfsmgr.Init(centralDir, dataDir, option.FsCacheLimit)\n\n\tblockmgr.Init(centralDir, dataDir)\n\n\tcommitmgr.Init(centralDir, dataDir)\n\n\tshare.Init(ccnetDB, seafileDB, option.GroupTableName, option.CloudMode)\n\n\trpcClientInit()\n\n\tfileopInit()\n\n\tsyncAPIInit()\n\n\tsizeSchedulerInit()\n\n\tvirtualRepoInit()\n\n\tinitUpload()\n\n\tmetrics.Init()\n\n\trouter := newHTTPRouter()\n\n\tgo handleSignals()\n\tgo handleUser1Signal()\n\n\tlog.Print(\"Seafile file server started.\")\n\n\tserver := new(http.Server)\n\tserver.Addr = fmt.Sprintf(\"%s:%d\", option.Host, option.Port)\n\tserver.Handler = router\n\n\terr = server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Errorf(\"File server exiting: %v\", err)\n\t}\n}\n\nfunc handleSignals() {\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)\n\t<-signalChan\n\tmetrics.Stop()\n\tremovePidfile(pidFilePath)\n\tos.Exit(0)\n}\n\nfunc handleUser1Signal() {\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGUSR1)\n\n\tfor {\n\t\t<-signalChan\n\t\tlogRotate()\n\t}\n}\n\nfunc logRotate() {\n\tif logToStdout {\n\t\treturn\n\t}\n\t// reopen fileserver log\n\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to reopen fileserver log: %v\", err)\n\t}\n\tlog.SetOutput(fp)\n\tif logFp != nil {\n\t\tlogFp.Close()\n\t\tlogFp = fp\n\t}\n\n\tutils.Dup(int(logFp.Fd()), int(os.Stderr.Fd()))\n}\n\nvar rpcclient *searpc.Client\n\nfunc rpcClientInit() {\n\tvar pipePath string\n\tif rpcPipePath != \"\" {\n\t\tpipePath = filepath.Join(rpcPipePath, \"seafile.sock\")\n\t} else {\n\t\tpipePath = filepath.Join(absDataDir, \"seafile.sock\")\n\t}\n\trpcclient = searpc.Init(pipePath, \"seafserv-threaded-rpcserver\", 10)\n}\n\nfunc newHTTPRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/protocol-version{slash:\\\\/?}\", handleProtocolVersion)\n\tr.Handle(\"/files/{.*}/{.*}\", appHandler(accessCB))\n\tr.Handle(\"/blks/{.*}/{.*}\", appHandler(accessBlksCB))\n\tr.Handle(\"/zip/{.*}\", appHandler(accessZipCB))\n\tr.Handle(\"/upload-api/{.*}\", appHandler(uploadAPICB))\n\tr.Handle(\"/upload-aj/{.*}\", appHandler(uploadAjaxCB))\n\tr.Handle(\"/update-api/{.*}\", appHandler(updateAPICB))\n\tr.Handle(\"/update-aj/{.*}\", appHandler(updateAjaxCB))\n\tr.Handle(\"/upload-blks-api/{.*}\", appHandler(uploadBlksAPICB))\n\tr.Handle(\"/upload-raw-blks-api/{.*}\", appHandler(uploadRawBlksAPICB))\n\n\t// links api\n\t//r.Handle(\"/u/{.*}\", appHandler(uploadLinkCB))\n\tr.Handle(\"/f/{.*}{slash:\\\\/?}\", appHandler(accessLinkCB))\n\t//r.Handle(\"/d/{.*}\", appHandler(accessDirLinkCB))\n\n\tr.Handle(\"/repos/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/files/{filepath:.*}\", appHandler(accessV2CB))\n\n\t// file syncing api\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/permission-check{slash:\\\\/?}\",\n\t\tappHandler(permissionCheckCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/commit/HEAD{slash:\\\\/?}\",\n\t\tappHandler(headCommitOperCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/commit/{id:[\\\\da-z]{40}}\",\n\t\tappHandler(commitOperCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/block/{id:[\\\\da-z]{40}}\",\n\t\tappHandler(blockOperCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/fs-id-list{slash:\\\\/?}\",\n\t\tappHandler(getFsObjIDCB))\n\tr.Handle(\"/repo/head-commits-multi{slash:\\\\/?}\",\n\t\tappHandler(headCommitsMultiCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/pack-fs{slash:\\\\/?}\",\n\t\tappHandler(packFSCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/check-fs{slash:\\\\/?}\",\n\t\tappHandler(checkFSCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/check-blocks{slash:\\\\/?}\",\n\t\tappHandler(checkBlockCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/recv-fs{slash:\\\\/?}\",\n\t\tappHandler(recvFSCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/quota-check{slash:\\\\/?}\",\n\t\tappHandler(getCheckQuotaCB))\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/jwt-token{slash:\\\\/?}\",\n\t\tappHandler(getJWTTokenCB))\n\n\t// seadrive api\n\tr.Handle(\"/repo/{repoid:[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}}/block-map/{id:[\\\\da-z]{40}}\",\n\t\tappHandler(getBlockMapCB))\n\tr.Handle(\"/accessible-repos{slash:\\\\/?}\", appHandler(getAccessibleRepoListCB))\n\n\t// pprof\n\tr.Handle(\"/debug/pprof\", &profileHandler{http.HandlerFunc(pprof.Index)})\n\tr.Handle(\"/debug/pprof/cmdline\", &profileHandler{http.HandlerFunc(pprof.Cmdline)})\n\tr.Handle(\"/debug/pprof/profile\", &profileHandler{http.HandlerFunc(pprof.Profile)})\n\tr.Handle(\"/debug/pprof/symbol\", &profileHandler{http.HandlerFunc(pprof.Symbol)})\n\tr.Handle(\"/debug/pprof/heap\", &profileHandler{pprof.Handler(\"heap\")})\n\tr.Handle(\"/debug/pprof/block\", &profileHandler{pprof.Handler(\"block\")})\n\tr.Handle(\"/debug/pprof/goroutine\", &profileHandler{pprof.Handler(\"goroutine\")})\n\tr.Handle(\"/debug/pprof/threadcreate\", &profileHandler{pprof.Handler(\"threadcreate\")})\n\tr.Handle(\"/debug/pprof/trace\", &traceHandler{})\n\n\tif option.HasRedisOptions {\n\t\tr.Use(metrics.MetricMiddleware)\n\t}\n\treturn r\n}\n\nfunc handleProtocolVersion(rsp http.ResponseWriter, r *http.Request) {\n\tio.WriteString(rsp, \"{\\\"version\\\": 2}\")\n}\n\ntype appError struct {\n\tError   error\n\tMessage string\n\tCode    int\n}\n\ntype appHandler func(http.ResponseWriter, *http.Request) *appError\n\nfunc (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif e := fn(w, r); e != nil {\n\t\tif e.Error != nil && e.Code == http.StatusInternalServerError {\n\t\t\tlog.Errorf(\"path %s internal server error: %v\\n\", r.URL.Path, e.Error)\n\t\t}\n\t\thttp.Error(w, e.Message, e.Code)\n\t}\n}\n\nfunc RecoverWrapper(f func()) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\n\tf()\n}\n\ntype profileHandler struct {\n\tpHandler http.Handler\n}\n\nfunc (p *profileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tqueries := r.URL.Query()\n\tpassword := queries.Get(\"password\")\n\tif !option.EnableProfiling || password != option.ProfilePassword {\n\t\thttp.Error(w, \"\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tp.pHandler.ServeHTTP(w, r)\n}\n\ntype traceHandler struct {\n}\n\nfunc (p *traceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tqueries := r.URL.Query()\n\tpassword := queries.Get(\"password\")\n\tif !option.EnableProfiling || password != option.ProfilePassword {\n\t\thttp.Error(w, \"\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tpprof.Trace(w, r)\n}\n"
  },
  {
    "path": "fileserver/fsmgr/fsmgr.go",
    "content": "// Package fsmgr manages fs objects\npackage fsmgr\n\nimport (\n\t\"bytes\"\n\t\"compress/zlib\"\n\t\"crypto/sha1\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/objstore\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n\tjsoniter \"github.com/json-iterator/go\"\n\n\t\"github.com/dgraph-io/ristretto\"\n)\n\nvar json = jsoniter.ConfigCompatibleWithStandardLibrary\n\n// Seafile is a file object\ntype Seafile struct {\n\tdata     []byte\n\tVersion  int      `json:\"version\"`\n\tFileType int      `json:\"type\"`\n\tFileID   string   `json:\"-\"`\n\tFileSize uint64   `json:\"size\"`\n\tBlkIDs   []string `json:\"block_ids\"`\n}\n\n// In the JSON encoding generated by C language, there are spaces after the ',' and ':', and the order of the fields is sorted by the key.\n// So it is not compatible with the json library generated by go.\nfunc (file *Seafile) toJSON() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('{')\n\tbuf.WriteString(\"\\\"block_ids\\\": [\")\n\tfor i, blkID := range file.BlkIDs {\n\t\tdata, err := json.Marshal(blkID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf.Write(data)\n\t\tif i < len(file.BlkIDs)-1 {\n\t\t\tbuf.WriteByte(',')\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t}\n\tbuf.WriteByte(']')\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err := json.Marshal(file.FileSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"size\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err = json.Marshal(SeafMetadataTypeFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"type\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err = json.Marshal(file.Version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"version\\\"\", data)\n\n\tbuf.WriteByte('}')\n\n\treturn buf.Bytes(), nil\n}\n\nfunc writeField(buf *bytes.Buffer, key string, value []byte) {\n\tbuf.WriteString(key)\n\tbuf.WriteByte(':')\n\tbuf.WriteByte(' ')\n\tbuf.Write(value)\n}\n\n// SeafDirent is a dir entry object\ntype SeafDirent struct {\n\tMode     uint32 `json:\"mode\"`\n\tID       string `json:\"id\"`\n\tName     string `json:\"name\"`\n\tMtime    int64  `json:\"mtime\"`\n\tModifier string `json:\"modifier\"`\n\tSize     int64  `json:\"size\"`\n}\n\nfunc (dent *SeafDirent) toJSON() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('{')\n\tdata, err := json.Marshal(dent.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"id\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err = json.Marshal(dent.Mode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"mode\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tif IsRegular(dent.Mode) {\n\t\tdata, err = jsonNoEscape(dent.Modifier)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twriteField(&buf, \"\\\"modifier\\\"\", data)\n\t\tbuf.WriteByte(',')\n\t\tbuf.WriteByte(' ')\n\t}\n\n\tdata, err = json.Marshal(dent.Mtime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"mtime\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err = jsonNoEscape(dent.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"name\\\"\", data)\n\n\tif IsRegular(dent.Mode) {\n\t\tbuf.WriteByte(',')\n\t\tbuf.WriteByte(' ')\n\t\tdata, err = json.Marshal(dent.Size)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\twriteField(&buf, \"\\\"size\\\"\", data)\n\t}\n\tbuf.WriteByte('}')\n\n\treturn buf.Bytes(), nil\n}\n\n// In golang json, the string is encoded using HTMLEscape, which replaces \"<\", \">\", \"&\", U+2028, and U+2029 are escaped to \"\\u003c\",\"\\u003e\", \"\\u0026\", \"\\u2028\", and \"\\u2029\".\n// So it is not compatible with the json library generated by c. This replacement can be disabled when using an Encoder, by calling SetEscapeHTML(false).\nfunc jsonNoEscape(data interface{}) ([]byte, error) {\n\tvar buf bytes.Buffer\n\n\tencoder := json.NewEncoder(&buf)\n\tencoder.SetEscapeHTML(false)\n\n\tif err := encoder.Encode(data); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbytes := buf.Bytes()\n\n\t// Encode will terminate each value with a newline.\n\t// This makes the output look a little nicer\n\t// when debugging, and some kind of space\n\t// is required if the encoded value was a number,\n\t// so that the reader knows there aren't more\n\t// digits coming.\n\t// The newline at the end needs to be removed for the above reasons.\n\treturn bytes[:len(bytes)-1], nil\n}\n\n// SeafDir is a dir object\ntype SeafDir struct {\n\tdata    []byte\n\tVersion int           `json:\"version\"`\n\tDirType int           `json:\"type\"`\n\tDirID   string        `json:\"-\"`\n\tEntries []*SeafDirent `json:\"dirents\"`\n}\n\nfunc (dir *SeafDir) toJSON() ([]byte, error) {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('{')\n\tbuf.WriteString(\"\\\"dirents\\\": [\")\n\tfor i, entry := range dir.Entries {\n\t\tdata, err := entry.toJSON()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf.Write(data)\n\t\tif i < len(dir.Entries)-1 {\n\t\t\tbuf.WriteByte(',')\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t}\n\tbuf.WriteByte(']')\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err := json.Marshal(SeafMetadataTypeDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"type\\\"\", data)\n\tbuf.WriteByte(',')\n\tbuf.WriteByte(' ')\n\n\tdata, err = json.Marshal(dir.Version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twriteField(&buf, \"\\\"version\\\"\", data)\n\n\tbuf.WriteByte('}')\n\n\treturn buf.Bytes(), nil\n}\n\n// FileCountInfo contains information of files\ntype FileCountInfo struct {\n\tFileCount int64\n\tSize      int64\n\tDirCount  int64\n}\n\n// Meta data type of dir or file\nconst (\n\tSeafMetadataTypeInvalid = iota\n\tSeafMetadataTypeFile\n\tSeafMetadataTypeLink\n\tSeafMetadataTypeDir\n)\n\nvar store *objstore.ObjectStore\n\n// Empty value of sha1\nconst (\n\tEmptySha1 = \"0000000000000000000000000000000000000000\"\n)\n\n// Since zlib library allocates a large amount of memory every time a new reader is created, when the number of calls is too large,\n// the GC will be executed frequently, resulting in high CPU usage.\nvar zlibReaders []io.ReadCloser\nvar zlibLock sync.Mutex\n\n// Add fs cache, on the one hand to avoid repeated creation and destruction of repeatedly accessed objects,\n// on the other hand it will also slow down the speed at which objects are released.\nvar fsCache *ristretto.Cache\n\n// Init initializes fs manager and creates underlying object store.\nfunc Init(seafileConfPath string, seafileDataDir string, fsCacheLimit int64) {\n\tstore = objstore.New(seafileConfPath, seafileDataDir, \"fs\")\n\tfsCache, _ = ristretto.NewCache(&ristretto.Config{\n\t\tNumCounters: 1e7,          // number of keys to track frequency of (10M).\n\t\tMaxCost:     fsCacheLimit, // maximum cost of cache.\n\t\tBufferItems: 64,           // number of keys per Get buffer.\n\t\tCost:        calCost,\n\t})\n}\n\nfunc calCost(value interface{}) int64 {\n\treturn sizeOf(value)\n}\n\nconst (\n\tsizeOfString     = int64(unsafe.Sizeof(string(\"\")))\n\tsizeOfPointer    = int64(unsafe.Sizeof(uintptr(0)))\n\tsizeOfSeafile    = int64(unsafe.Sizeof(Seafile{}))\n\tsizeOfSeafDir    = int64(unsafe.Sizeof(SeafDir{}))\n\tsizeOfSeafDirent = int64(unsafe.Sizeof(SeafDirent{}))\n)\n\nfunc sizeOf(a interface{}) int64 {\n\tvar size int64\n\tswitch x := a.(type) {\n\tcase string:\n\t\treturn sizeOfString + int64(len(x))\n\tcase []string:\n\t\tfor _, s := range x {\n\t\t\tsize += sizeOf(s)\n\t\t}\n\t\treturn size\n\tcase *Seafile:\n\t\tsize = sizeOfPointer\n\t\tsize += sizeOfSeafile\n\t\tsize += int64(len(x.FileID))\n\t\tsize += sizeOf(x.BlkIDs)\n\t\treturn size\n\tcase *SeafDir:\n\t\tsize = sizeOfPointer\n\t\tsize += sizeOfSeafDir\n\t\tsize += int64(len(x.DirID))\n\t\tfor _, dent := range x.Entries {\n\t\t\tsize += sizeOf(dent)\n\t\t}\n\t\treturn size\n\tcase *SeafDirent:\n\t\tsize = sizeOfPointer\n\t\tsize += sizeOfSeafDirent\n\t\tsize += int64(len(x.ID))\n\t\tsize += int64(len(x.Name))\n\t\tsize += int64(len(x.Modifier))\n\t\treturn size\n\n\t}\n\treturn 0\n}\n\nfunc initZlibReader() (io.ReadCloser, error) {\n\tvar buf bytes.Buffer\n\n\t// Since the corresponding reader has not been obtained when zlib is initialized,\n\t// an io.Reader needs to be built to initialize zlib.\n\tw := zlib.NewWriter(&buf)\n\tw.Close()\n\n\tr, err := zlib.NewReader(&buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}\n\n// GetOneZlibReader gets a zlib reader from zlibReaders.\nfunc GetOneZlibReader() io.ReadCloser {\n\tzlibLock.Lock()\n\tdefer zlibLock.Unlock()\n\tvar reader io.ReadCloser\n\tif len(zlibReaders) == 0 {\n\t\treader, err := initZlibReader()\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn reader\n\t}\n\treader = zlibReaders[0]\n\tzlibReaders = zlibReaders[1:]\n\n\treturn reader\n}\n\nfunc ReturnOneZlibReader(reader io.ReadCloser) {\n\tif reader == nil {\n\t\treturn\n\t}\n\tzlibLock.Lock()\n\tdefer zlibLock.Unlock()\n\tzlibReaders = append(zlibReaders, reader)\n}\n\n// NewDirent initializes a SeafDirent object\nfunc NewDirent(id string, name string, mode uint32, mtime int64, modifier string, size int64) *SeafDirent {\n\tdent := new(SeafDirent)\n\tdent.ID = id\n\tif id == \"\" {\n\t\tdent.ID = EmptySha1\n\t}\n\tdent.Name = name\n\tdent.Mode = mode\n\tdent.Mtime = mtime\n\tif IsRegular(mode) {\n\t\tdent.Modifier = modifier\n\t\tdent.Size = size\n\t}\n\n\treturn dent\n}\n\n// NewSeafdir initializes a SeafDir object\nfunc NewSeafdir(version int, entries []*SeafDirent) (*SeafDir, error) {\n\tdir := new(SeafDir)\n\tdir.Version = version\n\tdir.Entries = entries\n\tif len(entries) == 0 {\n\t\tdir.DirID = EmptySha1\n\t\treturn dir, nil\n\t}\n\tjsonstr, err := dir.toJSON()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to convert seafdir to json\")\n\t\treturn nil, err\n\t}\n\tdir.data = jsonstr\n\tchecksum := sha1.Sum(jsonstr)\n\tdir.DirID = hex.EncodeToString(checksum[:])\n\n\treturn dir, nil\n}\n\n// NewSeafile initializes a Seafile object\nfunc NewSeafile(version int, fileSize int64, blkIDs []string) (*Seafile, error) {\n\tseafile := new(Seafile)\n\tseafile.Version = version\n\tseafile.FileSize = uint64(fileSize)\n\tseafile.BlkIDs = blkIDs\n\tif len(blkIDs) == 0 {\n\t\tseafile.FileID = EmptySha1\n\t\treturn seafile, nil\n\t}\n\n\tjsonstr, err := seafile.toJSON()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to convert seafile to json\")\n\t\treturn nil, err\n\t}\n\tseafile.data = jsonstr\n\tcheckSum := sha1.Sum(jsonstr)\n\tseafile.FileID = hex.EncodeToString(checkSum[:])\n\n\treturn seafile, nil\n}\n\nfunc uncompress(p []byte, reader io.ReadCloser) ([]byte, error) {\n\tb := bytes.NewReader(p)\n\tvar out bytes.Buffer\n\n\tif reader == nil {\n\t\tr, err := zlib.NewReader(b)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, err = io.Copy(&out, r)\n\t\tif err != nil {\n\t\t\tr.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Close()\n\t\treturn out.Bytes(), nil\n\t}\n\n\t// resue the old zlib reader.\n\tresetter, _ := reader.(zlib.Resetter)\n\terr := resetter.Reset(b, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = io.Copy(&out, reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn out.Bytes(), nil\n}\n\nfunc compress(p []byte) ([]byte, error) {\n\tvar out bytes.Buffer\n\tw := zlib.NewWriter(&out)\n\n\t_, err := w.Write(p)\n\tif err != nil {\n\t\tw.Close()\n\t\treturn nil, err\n\t}\n\n\tw.Close()\n\n\treturn out.Bytes(), nil\n}\n\n// FromData reads from p and converts JSON-encoded data to Seafile.\nfunc (seafile *Seafile) FromData(p []byte, reader io.ReadCloser) error {\n\tb, err := uncompress(p, reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, seafile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif seafile.FileType != SeafMetadataTypeFile {\n\t\treturn fmt.Errorf(\"object %s is not a file\", seafile.FileID)\n\t}\n\tif seafile.Version < 1 {\n\t\treturn fmt.Errorf(\"seafile object %s version should be > 0, version is %d\", seafile.FileID, seafile.Version)\n\t}\n\tif seafile.BlkIDs == nil {\n\t\treturn fmt.Errorf(\"no block id array in seafile object %s\", seafile.FileID)\n\t}\n\tfor _, blkID := range seafile.BlkIDs {\n\t\tif !utils.IsObjectIDValid(blkID) {\n\t\t\treturn fmt.Errorf(\"block id %s is invalid\", blkID)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ToData converts seafile to JSON-encoded data and writes to w.\nfunc (seafile *Seafile) ToData(w io.Writer) error {\n\tbuf, err := compress(seafile.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// ToData converts seafdir to JSON-encoded data and writes to w.\nfunc (seafdir *SeafDir) ToData(w io.Writer) error {\n\tbuf, err := compress(seafdir.data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// FromData reads from p and converts JSON-encoded data to SeafDir.\nfunc (seafdir *SeafDir) FromData(p []byte, reader io.ReadCloser) error {\n\tb, err := uncompress(p, reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, seafdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif seafdir.DirType != SeafMetadataTypeDir {\n\t\treturn fmt.Errorf(\"object %s is not a dir\", seafdir.DirID)\n\t}\n\tif seafdir.Version < 1 {\n\t\treturn fmt.Errorf(\"dir object %s version should be > 0, version is %d\", seafdir.DirID, seafdir.Version)\n\t}\n\tif seafdir.Entries == nil {\n\t\treturn fmt.Errorf(\"no dirents in dir object %s\", seafdir.DirID)\n\t}\n\tfor _, dent := range seafdir.Entries {\n\t\tif !utils.IsObjectIDValid(dent.ID) {\n\t\t\treturn fmt.Errorf(\"dirent id %s is invalid\", dent.ID)\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ReadRaw reads data in binary format from storage backend.\nfunc ReadRaw(repoID string, objID string, w io.Writer) error {\n\terr := store.Read(repoID, objID, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// WriteRaw writes data in binary format to storage backend.\nfunc WriteRaw(repoID string, objID string, r io.Reader) error {\n\terr := store.Write(repoID, objID, r, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// GetSeafile gets seafile from storage backend.\nfunc GetSeafile(repoID string, fileID string) (*Seafile, error) {\n\treturn getSeafile(repoID, fileID, nil)\n}\n\n// GetSeafileWithZlibReader gets seafile from storage backend with a zlib reader.\nfunc GetSeafileWithZlibReader(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) {\n\treturn getSeafile(repoID, fileID, reader)\n}\n\nfunc getSeafile(repoID string, fileID string, reader io.ReadCloser) (*Seafile, error) {\n\n\tvar buf bytes.Buffer\n\tseafile := new(Seafile)\n\tif fileID == EmptySha1 {\n\t\tseafile.FileID = EmptySha1\n\t\treturn seafile, nil\n\t}\n\n\tseafile.FileID = fileID\n\n\terr := ReadRaw(repoID, fileID, &buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to read seafile object from storage : %v\", err)\n\t\treturn nil, errors\n\t}\n\n\terr = seafile.FromData(buf.Bytes(), reader)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to parse seafile object %s/%s : %v\", repoID, fileID, err)\n\t\treturn nil, errors\n\t}\n\n\tif seafile.Version < 1 {\n\t\terrors := fmt.Errorf(\"seafile object %s/%s version should be > 0\", repoID, fileID)\n\t\treturn nil, errors\n\t}\n\n\treturn seafile, nil\n}\n\n// SaveSeafile saves seafile to storage backend.\nfunc SaveSeafile(repoID string, seafile *Seafile) error {\n\tfileID := seafile.FileID\n\tif fileID == EmptySha1 {\n\t\treturn nil\n\t}\n\n\texist, _ := store.Exists(repoID, fileID)\n\tif exist {\n\t\treturn nil\n\t}\n\n\tseafile.FileType = SeafMetadataTypeFile\n\tvar buf bytes.Buffer\n\terr := seafile.ToData(&buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to convert seafile object %s/%s to json\", repoID, fileID)\n\t\treturn errors\n\t}\n\n\terr = WriteRaw(repoID, fileID, &buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to write seafile object to storage : %v\", err)\n\t\treturn errors\n\t}\n\n\treturn nil\n}\n\n// GetSeafdir gets seafdir from storage backend.\nfunc GetSeafdir(repoID string, dirID string) (*SeafDir, error) {\n\treturn getSeafdir(repoID, dirID, nil, false)\n}\n\n// GetSeafdir gets seafdir from storage backend with a zlib reader.\nfunc GetSeafdirWithZlibReader(repoID string, dirID string, reader io.ReadCloser) (*SeafDir, error) {\n\treturn getSeafdir(repoID, dirID, reader, true)\n}\n\nfunc getSeafdir(repoID string, dirID string, reader io.ReadCloser, useCache bool) (*SeafDir, error) {\n\tvar seafdir *SeafDir\n\tif useCache {\n\t\tseafdir = getSeafdirFromCache(repoID, dirID)\n\t\tif seafdir != nil {\n\t\t\treturn seafdir, nil\n\t\t}\n\t}\n\tvar buf bytes.Buffer\n\tseafdir = new(SeafDir)\n\tif dirID == EmptySha1 {\n\t\tseafdir.DirID = EmptySha1\n\t\treturn seafdir, nil\n\t}\n\n\tseafdir.DirID = dirID\n\n\terr := ReadRaw(repoID, dirID, &buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to read seafdir object from storage : %v\", err)\n\t\treturn nil, errors\n\t}\n\n\terr = seafdir.FromData(buf.Bytes(), reader)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to parse seafdir object %s/%s : %v\", repoID, dirID, err)\n\t\treturn nil, errors\n\t}\n\n\tif seafdir.Version < 1 {\n\t\terrors := fmt.Errorf(\"seadir object %s/%s version should be > 0\", repoID, dirID)\n\t\treturn nil, errors\n\t}\n\n\tif useCache {\n\t\tsetSeafdirToCache(repoID, seafdir)\n\t}\n\n\treturn seafdir, nil\n}\n\nfunc getSeafdirFromCache(repoID string, dirID string) *SeafDir {\n\tkey := repoID + dirID\n\tv, ok := fsCache.Get(key)\n\tif !ok {\n\t\treturn nil\n\t}\n\tseafdir, ok := v.(*SeafDir)\n\tif ok {\n\t\treturn seafdir\n\t}\n\n\treturn nil\n}\n\nfunc setSeafdirToCache(repoID string, seafdir *SeafDir) error {\n\tkey := repoID + seafdir.DirID\n\tfsCache.SetWithTTL(key, seafdir, 0, time.Duration(1*time.Hour))\n\n\treturn nil\n}\n\n// SaveSeafdir saves seafdir to storage backend.\nfunc SaveSeafdir(repoID string, seafdir *SeafDir) error {\n\tdirID := seafdir.DirID\n\tif dirID == EmptySha1 {\n\t\treturn nil\n\t}\n\texist, _ := store.Exists(repoID, dirID)\n\tif exist {\n\t\treturn nil\n\t}\n\n\tseafdir.DirType = SeafMetadataTypeDir\n\tvar buf bytes.Buffer\n\terr := seafdir.ToData(&buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to convert seafdir object %s/%s to json\", repoID, dirID)\n\t\treturn errors\n\t}\n\n\terr = WriteRaw(repoID, dirID, &buf)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"failed to write seafdir object to storage : %v\", err)\n\t\treturn errors\n\t}\n\n\treturn nil\n}\n\n// Exists check if fs object is exists.\nfunc Exists(repoID string, objID string) (bool, error) {\n\tif objID == EmptySha1 {\n\t\treturn true, nil\n\t}\n\treturn store.Exists(repoID, objID)\n}\n\nfunc comp(c rune) bool {\n\treturn c == '/'\n}\n\n// IsDir check if the mode is dir.\nfunc IsDir(m uint32) bool {\n\treturn (m & syscall.S_IFMT) == syscall.S_IFDIR\n}\n\n// IsRegular Check if the mode is regular.\nfunc IsRegular(m uint32) bool {\n\treturn (m & syscall.S_IFMT) == syscall.S_IFREG\n}\n\n// ErrPathNoExist is an error indicating that the file does not exist\nvar ErrPathNoExist = fmt.Errorf(\"path does not exist\")\n\n// GetSeafdirByPath gets the object of seafdir by path.\nfunc GetSeafdirByPath(repoID string, rootID string, path string) (*SeafDir, error) {\n\tdir, err := GetSeafdir(repoID, rootID)\n\tif err != nil {\n\t\terrors := fmt.Errorf(\"directory is missing\")\n\t\treturn nil, errors\n\t}\n\n\tpath = filepath.Join(\"/\", path)\n\tparts := strings.FieldsFunc(path, comp)\n\tvar dirID string\n\tfor _, name := range parts {\n\t\tentries := dir.Entries\n\t\tfor _, v := range entries {\n\t\t\tif v.Name == name && IsDir(v.Mode) {\n\t\t\t\tdirID = v.ID\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif dirID == `` {\n\t\t\treturn nil, ErrPathNoExist\n\t\t}\n\n\t\tdir, err = GetSeafdir(repoID, dirID)\n\t\tif err != nil {\n\t\t\terrors := fmt.Errorf(\"directory is missing\")\n\t\t\treturn nil, errors\n\t\t}\n\t}\n\n\treturn dir, nil\n}\n\n// GetSeafdirIDByPath gets the dirID of SeafDir by path.\nfunc GetSeafdirIDByPath(repoID, rootID, path string) (string, error) {\n\tdirID, mode, err := GetObjIDByPath(repoID, rootID, path)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir id by path: %s: %w\", path, err)\n\t\treturn \"\", err\n\t}\n\tif dirID == \"\" || !IsDir(mode) {\n\t\treturn \"\", nil\n\t}\n\n\treturn dirID, nil\n}\n\n// GetObjIDByPath gets the obj id by path\nfunc GetObjIDByPath(repoID, rootID, path string) (string, uint32, error) {\n\tvar name string\n\tvar baseDir *SeafDir\n\tformatPath := filepath.Join(path)\n\tif len(formatPath) == 0 || formatPath == \"/\" {\n\t\treturn rootID, syscall.S_IFDIR, nil\n\t}\n\tindex := strings.Index(formatPath, \"/\")\n\tif index < 0 {\n\t\tdir, err := GetSeafdir(repoID, rootID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to find root dir %s: %v\", rootID, err)\n\t\t\treturn \"\", 0, err\n\t\t}\n\t\tname = formatPath\n\t\tbaseDir = dir\n\t} else {\n\t\tname = filepath.Base(formatPath)\n\t\tdirName := filepath.Dir(formatPath)\n\t\tdir, err := GetSeafdirByPath(repoID, rootID, dirName)\n\t\tif err != nil {\n\t\t\tif err == ErrPathNoExist {\n\t\t\t\treturn \"\", syscall.S_IFDIR, ErrPathNoExist\n\t\t\t}\n\t\t\terr := fmt.Errorf(\"failed to find dir %s in repo %s: %v\", dirName, repoID, err)\n\t\t\treturn \"\", syscall.S_IFDIR, err\n\t\t}\n\t\tbaseDir = dir\n\t}\n\n\tentries := baseDir.Entries\n\tfor _, de := range entries {\n\t\tif de.Name == name {\n\t\t\treturn de.ID, de.Mode, nil\n\t\t}\n\t}\n\n\treturn \"\", 0, nil\n\n}\n\n// GetFileCountInfoByPath gets the count info of file by path.\nfunc GetFileCountInfoByPath(repoID, rootID, path string) (*FileCountInfo, error) {\n\tdirID, err := GetSeafdirIDByPath(repoID, rootID, path)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get file count info for repo %s path %s: %v\", repoID, path, err)\n\t\treturn nil, err\n\t}\n\n\tinfo, err := getFileCountInfo(repoID, dirID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get file count in repo %s: %v\", repoID, err)\n\t\treturn nil, err\n\t}\n\n\treturn info, nil\n}\n\nfunc getFileCountInfo(repoID, dirID string) (*FileCountInfo, error) {\n\tdir, err := GetSeafdir(repoID, dirID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get dir: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tinfo := new(FileCountInfo)\n\n\tentries := dir.Entries\n\tfor _, de := range entries {\n\t\tif IsDir(de.Mode) {\n\t\t\ttmpInfo, err := getFileCountInfo(repoID, de.ID)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to get file count: %v\", err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinfo.DirCount = tmpInfo.DirCount + 1\n\t\t\tinfo.FileCount += tmpInfo.FileCount\n\t\t\tinfo.Size += tmpInfo.Size\n\t\t} else {\n\t\t\tinfo.FileCount++\n\t\t\tinfo.Size += de.Size\n\t\t}\n\t}\n\n\treturn info, nil\n}\n\nfunc GetDirentByPath(repoID, rootID, rpath string) (*SeafDirent, error) {\n\tparentDir := filepath.Dir(rpath)\n\tfileName := filepath.Base(rpath)\n\n\tvar dir *SeafDir\n\tvar err error\n\n\tif parentDir == \".\" {\n\t\tdir, err = GetSeafdir(repoID, rootID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdir, err = GetSeafdirByPath(repoID, rootID, parentDir)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, de := range dir.Entries {\n\t\tif de.Name == fileName {\n\t\t\treturn de, nil\n\t\t}\n\t}\n\n\treturn nil, ErrPathNoExist\n}\n"
  },
  {
    "path": "fileserver/fsmgr/fsmgr_test.go",
    "content": "package fsmgr\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\tseafileConfPath = \"/tmp/conf\"\n\tseafileDataDir  = \"/tmp/conf/seafile-data\"\n\trepoID          = \"b1f2ad61-9164-418a-a47f-ab805dbd5694\"\n\tblkID           = \"0401fc662e3bc87a41f299a907c056aaf8322a26\"\n\tsubDirID        = \"0401fc662e3bc87a41f299a907c056aaf8322a27\"\n)\n\nvar dirID string\nvar fileID string\n\nfunc createFile() error {\n\tvar blkIDs []string\n\tfor i := 0; i < 2; i++ {\n\t\tblkshal := blkID\n\t\tblkIDs = append(blkIDs, blkshal)\n\t}\n\n\tseafile, err := NewSeafile(1, 100, blkIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = SaveSeafile(repoID, seafile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfileID = seafile.FileID\n\n\tvar entries []*SeafDirent\n\tfor i := 0; i < 2; i++ {\n\t\tdirent := SeafDirent{ID: subDirID, Name: \"/\", Mode: 0x4000}\n\t\tentries = append(entries, &dirent)\n\t}\n\tseafdir, err := NewSeafdir(1, entries)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\treturn err\n\t}\n\terr = SaveSeafdir(repoID, seafdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdirID = seafdir.DirID\n\n\treturn nil\n}\n\nfunc delFile() error {\n\terr := os.RemoveAll(seafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc TestMain(m *testing.M) {\n\tInit(seafileConfPath, seafileDataDir, 2<<30)\n\terr := createFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to create test file : %v.\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tcode := m.Run()\n\terr = delFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to remove test file : %v\\n\", err)\n\t}\n\tos.Exit(code)\n}\n\nfunc TestGetSeafile(t *testing.T) {\n\texists, err := Exists(repoID, fileID)\n\tif !exists {\n\t\tt.Errorf(\"seafile is not exists : %v.\\n\", err)\n\t}\n\tseafile, err := GetSeafile(repoID, fileID)\n\tif err != nil || seafile == nil {\n\t\tt.Errorf(\"Failed to get seafile : %v.\\n\", err)\n\t\tt.FailNow()\n\t}\n\n\tfor _, v := range seafile.BlkIDs {\n\t\tif v != blkID {\n\t\t\tt.Errorf(\"Wrong file content.\\n\")\n\t\t}\n\t}\n}\n\nfunc TestGetSeafdir(t *testing.T) {\n\texists, err := Exists(repoID, dirID)\n\tif !exists {\n\t\tt.Errorf(\"seafile is not exists : %v.\\n\", err)\n\t}\n\tseafdir, err := GetSeafdir(repoID, dirID)\n\tif err != nil || seafdir == nil {\n\t\tt.Errorf(\"Failed to get seafdir : %v.\\n\", err)\n\t\tt.FailNow()\n\t}\n\n\tfor _, v := range seafdir.Entries {\n\t\tif v.ID != subDirID {\n\t\t\tt.Errorf(\"Wrong file content.\\n\")\n\t\t}\n\t}\n\n}\n\nfunc TestGetSeafdirByPath(t *testing.T) {\n\tseafdir, err := GetSeafdirByPath(repoID, dirID, \"/\")\n\tif err != nil || seafdir == nil {\n\t\tt.Errorf(\"Failed to get seafdir : %v.\\n\", err)\n\t\tt.FailNow()\n\t}\n\n\tfor _, v := range seafdir.Entries {\n\t\tif v.ID != subDirID {\n\t\t\tt.Errorf(\"Wrong file content.\\n\")\n\t\t}\n\t}\n\n}\n"
  },
  {
    "path": "fileserver/go.mod",
    "content": "module github.com/haiwen/seafile-server/fileserver\n\ngo 1.22\n\nrequire (\n\tgithub.com/dgraph-io/ristretto v0.2.0\n\tgithub.com/go-redis/redis/v8 v8.11.5\n\tgithub.com/go-sql-driver/mysql v1.5.0\n\tgithub.com/golang-jwt/jwt/v5 v5.2.2\n\tgithub.com/google/uuid v1.1.1\n\tgithub.com/gorilla/mux v1.7.4\n\tgithub.com/json-iterator/go v1.1.12\n\tgithub.com/sirupsen/logrus v1.9.3\n\tgolang.org/x/text v0.3.8\n\tgopkg.in/ini.v1 v1.55.0\n)\n\nrequire (\n\tgithub.com/cespare/xxhash/v2 v2.1.2 // indirect\n\tgithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect\n\tgithub.com/dustin/go-humanize v1.0.1 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect\n\tgithub.com/modern-go/reflect2 v1.0.2 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/smartystreets/goconvey v1.6.4 // indirect\n\tgolang.org/x/sys v0.11.0 // indirect\n)\n"
  },
  {
    "path": "fileserver/go.sum",
    "content": "github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=\ngithub.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=\ngithub.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=\ngithub.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=\ngithub.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=\ngithub.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=\ngithub.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=\ngithub.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=\ngithub.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=\ngithub.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=\ngithub.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=\ngithub.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=\ngithub.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=\ngithub.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=\ngithub.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=\ngithub.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=\ngithub.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=\ngithub.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=\ngithub.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=\ngithub.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=\ngithub.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=\ngolang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=\ngolang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=\ngolang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=\ngolang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ=\ngopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "fileserver/http_code.go",
    "content": "package main\n\nconst (\n\tseafHTTPResBadFileName   = 440\n\tseafHTTPResExists        = 441\n\tseafHTTPResNotExists     = 441\n\tseafHTTPResTooLarge      = 442\n\tseafHTTPResNoQuota       = 443\n\tseafHTTPResRepoDeleted   = 444\n\tseafHTTPResRepoCorrupted = 445\n\tseafHTTPResBlockMissing  = 446\n)\n"
  },
  {
    "path": "fileserver/merge.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n)\n\ntype mergeOptions struct {\n\tremoteRepoID    string\n\tremoteHead      string\n\tmergedRoot      string\n\tconflict        bool\n\temailToNickname map[string]string\n}\n\nfunc mergeTrees(storeID string, roots []string, opt *mergeOptions) error {\n\tif len(roots) != 3 {\n\t\terr := fmt.Errorf(\"invalid argument\")\n\t\treturn err\n\t}\n\n\topt.emailToNickname = make(map[string]string)\n\n\tvar trees []*fsmgr.SeafDir\n\tfor i := 0; i < 3; i++ {\n\t\tdir, err := fsmgr.GetSeafdir(storeID, roots[i])\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to get dir: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\ttrees = append(trees, dir)\n\t}\n\n\terr := mergeTreesRecursive(storeID, trees, \"\", opt)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to merge trees: %v\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc mergeTreesRecursive(storeID string, trees []*fsmgr.SeafDir, baseDir string, opt *mergeOptions) error {\n\tvar ptrs [3][]*fsmgr.SeafDirent\n\tvar mergedDents []*fsmgr.SeafDirent\n\n\tn := 3\n\tfor i := 0; i < n; i++ {\n\t\tif trees[i] != nil {\n\t\t\tptrs[i] = trees[i].Entries\n\t\t}\n\t}\n\n\tvar done bool\n\tvar offset = make([]int, n)\n\tfor {\n\t\tdents := make([]*fsmgr.SeafDirent, n)\n\t\tvar firstName string\n\t\tdone = true\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif len(ptrs[i]) > offset[i] {\n\t\t\t\tdone = false\n\t\t\t\tdent := ptrs[i][offset[i]]\n\t\t\t\tif firstName == \"\" {\n\t\t\t\t\tfirstName = dent.Name\n\t\t\t\t} else if dent.Name > firstName {\n\t\t\t\t\tfirstName = dent.Name\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\n\t\tvar nFiles, nDirs int\n\t\tfor i := 0; i < n; i++ {\n\t\t\tif len(ptrs[i]) > offset[i] {\n\t\t\t\tdent := ptrs[i][offset[i]]\n\t\t\t\tif firstName == dent.Name {\n\t\t\t\t\tif fsmgr.IsDir(dent.Mode) {\n\t\t\t\t\t\tnDirs++\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnFiles++\n\t\t\t\t\t}\n\t\t\t\t\tdents[i] = dent\n\t\t\t\t\toffset[i]++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif nFiles > 0 {\n\t\t\tretDents, err := mergeEntries(storeID, dents, baseDir, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmergedDents = append(mergedDents, retDents...)\n\t\t}\n\n\t\tif nDirs > 0 {\n\t\t\tretDents, err := mergeDirectories(storeID, dents, baseDir, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmergedDents = append(mergedDents, retDents...)\n\t\t}\n\t}\n\n\tsort.Sort(Dirents(mergedDents))\n\tmergedTree, err := fsmgr.NewSeafdir(1, mergedDents)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\topt.mergedRoot = mergedTree.DirID\n\n\tif trees[1] != nil && trees[1].DirID == mergedTree.DirID ||\n\t\ttrees[2] != nil && trees[2].DirID == mergedTree.DirID {\n\t\treturn nil\n\t}\n\n\terr = fsmgr.SaveSeafdir(storeID, mergedTree)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save merged tree %s/%s\", storeID, baseDir)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc mergeEntries(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {\n\tvar mergedDents []*fsmgr.SeafDirent\n\tn := 3\n\tfiles := make([]*fsmgr.SeafDirent, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && !fsmgr.IsDir(dents[i].Mode) {\n\t\t\tfiles[i] = dents[i]\n\t\t}\n\t}\n\n\tbase := files[0]\n\thead := files[1]\n\tremote := files[2]\n\n\tif head != nil && remote != nil {\n\t\tif head.ID == remote.ID {\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t} else if base != nil && base.ID == head.ID {\n\t\t\tmergedDents = append(mergedDents, remote)\n\t\t} else if base != nil && base.ID == remote.ID {\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t} else {\n\t\t\tconflictName, _ := mergeConflictFileName(storeID, opt, baseDir, head.Name)\n\t\t\tif conflictName == \"\" {\n\t\t\t\terr := fmt.Errorf(\"failed to generate conflict file name\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdents[2].Name = conflictName\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t\tmergedDents = append(mergedDents, remote)\n\t\t\topt.conflict = true\n\t\t}\n\t} else if base != nil && head == nil && remote != nil {\n\t\tif base.ID != remote.ID {\n\t\t\tif dents[1] != nil {\n\t\t\t\tconflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)\n\t\t\t\tif conflictName == \"\" {\n\t\t\t\t\terr := fmt.Errorf(\"failed to generate conflict file name\")\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdents[2].Name = conflictName\n\t\t\t\tmergedDents = append(mergedDents, remote)\n\t\t\t\topt.conflict = true\n\t\t\t} else {\n\t\t\t\tmergedDents = append(mergedDents, remote)\n\t\t\t}\n\t\t}\n\t} else if base != nil && head != nil && remote == nil {\n\t\tif base.ID != head.ID {\n\t\t\tif dents[2] != nil {\n\t\t\t\tconflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)\n\t\t\t\tif conflictName == \"\" {\n\t\t\t\t\terr := fmt.Errorf(\"failed to generate conflict file name\")\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdents[2].Name = conflictName\n\t\t\t\tmergedDents = append(mergedDents, head)\n\t\t\t\topt.conflict = true\n\t\t\t} else {\n\t\t\t\tmergedDents = append(mergedDents, head)\n\t\t\t}\n\t\t}\n\t} else if base == nil && head == nil && remote != nil {\n\t\tif dents[1] == nil {\n\t\t\tmergedDents = append(mergedDents, remote)\n\t\t} else if dents[0] != nil && dents[0].ID == dents[1].ID {\n\t\t\tmergedDents = append(mergedDents, remote)\n\t\t} else {\n\t\t\tconflictName, _ := mergeConflictFileName(storeID, opt, baseDir, remote.Name)\n\t\t\tif conflictName == \"\" {\n\t\t\t\terr := fmt.Errorf(\"failed to generate conflict file name\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdents[2].Name = conflictName\n\t\t\tmergedDents = append(mergedDents, remote)\n\t\t\topt.conflict = true\n\t\t}\n\t} else if base == nil && head != nil && remote == nil {\n\t\tif dents[2] == nil {\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t} else if dents[0] != nil && dents[0].ID == dents[2].ID {\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t} else {\n\t\t\tconflictName, _ := mergeConflictFileName(storeID, opt, baseDir, dents[2].Name)\n\t\t\tif conflictName == \"\" {\n\t\t\t\terr := fmt.Errorf(\"failed to generate conflict file name\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdents[2].Name = conflictName\n\t\t\tmergedDents = append(mergedDents, head)\n\t\t\topt.conflict = true\n\t\t}\n\t} /* else if base != nil && head == nil && remote == nil {\n\t    Don't need to add anything to mergeDents.\n\t}*/\n\n\treturn mergedDents, nil\n}\n\nfunc mergeDirectories(storeID string, dents []*fsmgr.SeafDirent, baseDir string, opt *mergeOptions) ([]*fsmgr.SeafDirent, error) {\n\tvar dirMask int\n\tvar mergedDents []*fsmgr.SeafDirent\n\tvar dirName string\n\tn := 3\n\tsubDirs := make([]*fsmgr.SeafDir, n)\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {\n\t\t\tdirMask |= 1 << i\n\t\t}\n\t}\n\n\tswitch dirMask {\n\tcase 0:\n\t\terr := fmt.Errorf(\"no dirent for merge\")\n\t\treturn nil, err\n\tcase 1:\n\t\treturn mergedDents, nil\n\tcase 2:\n\t\tmergedDents = append(mergedDents, dents[1])\n\t\treturn mergedDents, nil\n\tcase 3:\n\t\tif dents[0].ID == dents[1].ID {\n\t\t\treturn mergedDents, nil\n\t\t}\n\tcase 4:\n\t\tmergedDents = append(mergedDents, dents[2])\n\t\treturn mergedDents, nil\n\tcase 5:\n\t\tif dents[0].ID == dents[2].ID {\n\t\t\treturn mergedDents, nil\n\t\t}\n\tcase 6, 7:\n\t\tif dents[1].ID == dents[2].ID {\n\t\t\tmergedDents = append(mergedDents, dents[1])\n\t\t\treturn mergedDents, nil\n\t\t} else if dents[0] != nil && dents[0].ID == dents[1].ID {\n\t\t\tmergedDents = append(mergedDents, dents[2])\n\t\t\treturn mergedDents, nil\n\t\t} else if dents[0] != nil && dents[0].ID == dents[2].ID {\n\t\t\tmergedDents = append(mergedDents, dents[1])\n\t\t\treturn mergedDents, nil\n\t\t}\n\tdefault:\n\t\terr := fmt.Errorf(\"wrong dir mask for merge\")\n\t\treturn nil, err\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tsubDirs[i] = nil\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tif dents[i] != nil && fsmgr.IsDir(dents[i].Mode) {\n\t\t\tdir, err := fsmgr.GetSeafdir(storeID, dents[i].ID)\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"failed to get seafdir %s/%s\", storeID, dents[i].ID)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsubDirs[i] = dir\n\t\t\tdirName = dents[i].Name\n\t\t}\n\t}\n\n\tnewBaseDir := filepath.Join(baseDir, dirName)\n\tnewBaseDir = newBaseDir + \"/\"\n\terr := mergeTreesRecursive(storeID, subDirs, newBaseDir, opt)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to merge trees: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif dirMask == 3 || dirMask == 6 || dirMask == 7 {\n\t\tdent := dents[1]\n\t\tdent.ID = opt.mergedRoot\n\t\tmergedDents = append(mergedDents, dent)\n\t} else if dirMask == 5 {\n\t\tdent := dents[2]\n\t\tdent.ID = opt.mergedRoot\n\t\tmergedDents = append(mergedDents, dent)\n\t}\n\n\treturn mergedDents, nil\n}\n\nfunc mergeConflictFileName(storeID string, opt *mergeOptions, baseDir, fileName string) (string, error) {\n\tvar modifier string\n\tvar mtime int64\n\tfilePath := filepath.Join(baseDir, fileName)\n\tmodifier, mtime, err := getFileModifierMtime(opt.remoteRepoID, storeID, opt.remoteHead, filePath)\n\tif err != nil {\n\t\tcommit, err := commitmgr.Load(opt.remoteRepoID, opt.remoteHead)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to get head commit\")\n\t\t\treturn \"\", err\n\t\t}\n\t\tmodifier = commit.CreatorName\n\t\tmtime = time.Now().Unix()\n\t}\n\n\tnickname := getNickNameByModifier(opt.emailToNickname, modifier)\n\n\tconflictName := genConflictPath(fileName, nickname, mtime)\n\n\treturn conflictName, nil\n}\n\nfunc genConflictPath(originPath, modifier string, mtime int64) string {\n\tvar conflictPath string\n\tnow := time.Now()\n\ttimeBuf := now.Format(\"2006-Jan-2-15-04-05\")\n\tdot := strings.Index(originPath, \".\")\n\tif dot < 0 {\n\t\tif modifier != \"\" {\n\t\t\tconflictPath = fmt.Sprintf(\"%s (SFConflict %s %s)\",\n\t\t\t\toriginPath, modifier, timeBuf)\n\t\t} else {\n\t\t\tconflictPath = fmt.Sprintf(\"%s (SFConflict %s)\",\n\t\t\t\toriginPath, timeBuf)\n\t\t}\n\t} else {\n\t\tif modifier != \"\" {\n\t\t\tconflictPath = fmt.Sprintf(\"%s (SFConflict %s %s).%s\",\n\t\t\t\toriginPath, modifier, timeBuf, originPath[dot+1:])\n\t\t} else {\n\t\t\tconflictPath = fmt.Sprintf(\"%s (SFConflict %s).%s\",\n\t\t\t\toriginPath, timeBuf, originPath[dot+1:])\n\t\t}\n\t}\n\n\treturn conflictPath\n}\n\nfunc getNickNameByModifier(emailToNickname map[string]string, modifier string) string {\n\tif modifier == \"\" {\n\t\treturn \"\"\n\t}\n\tnickname, ok := emailToNickname[modifier]\n\tif ok {\n\t\treturn nickname\n\t}\n\tif option.JWTPrivateKey != \"\" {\n\t\tnickname = postGetNickName(modifier)\n\t}\n\n\tif nickname == \"\" {\n\t\tnickname = modifier\n\t}\n\n\temailToNickname[modifier] = nickname\n\n\treturn nickname\n}\n\nfunc postGetNickName(modifier string) string {\n\ttokenString, err := utils.GenSeahubJWTToken()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\theader := map[string][]string{\n\t\t\"Authorization\": {\"Token \" + tokenString},\n\t}\n\n\tdata, err := json.Marshal(map[string]interface{}{\n\t\t\"user_id_list\": []string{modifier},\n\t})\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\turl := option.SeahubURL + \"/user-list/\"\n\tstatus, body, err := utils.HttpCommon(\"POST\", url, header, bytes.NewReader(data))\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tif status != http.StatusOK {\n\t\treturn \"\"\n\t}\n\n\tresults := make(map[string]interface{})\n\terr = json.Unmarshal(body, &results)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tuserList, ok := results[\"user_list\"].([]interface{})\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tnickname := \"\"\n\tfor _, element := range userList {\n\t\tlist, ok := element.(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tnickname, _ = list[\"name\"].(string)\n\t\tif nickname != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nickname\n}\n\nfunc getFileModifierMtime(repoID, storeID, head, filePath string) (string, int64, error) {\n\tcommit, err := commitmgr.Load(repoID, head)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit\")\n\t\treturn \"\", -1, err\n\t}\n\n\tparent := filepath.Dir(filePath)\n\tif parent == \".\" {\n\t\tparent = \"\"\n\t}\n\n\tfileName := filepath.Base(filePath)\n\tdir, err := fsmgr.GetSeafdirByPath(storeID, commit.RootID, parent)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"dir %s doesn't exist in repo %s\", parent, repoID)\n\t\treturn \"\", -1, err\n\t}\n\n\tvar dent *fsmgr.SeafDirent\n\tentries := dir.Entries\n\tfor _, d := range entries {\n\t\tif d.Name == fileName {\n\t\t\tdent = d\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif dent == nil {\n\t\terr := fmt.Errorf(\"file %s doesn't exist in repo %s\", fileName, repoID)\n\t\treturn \"\", -1, err\n\t}\n\n\treturn dent.Modifier, dent.Mtime, nil\n}\n"
  },
  {
    "path": "fileserver/merge_test.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n)\n\nconst (\n\tmergeTestCommitID        = \"0401fc662e3bc87a41f299a907c056aaf8322a27\"\n\tmergeTestRepoID          = \"b1f2ad61-9164-418a-a47f-ab805dbd5694\"\n\tmergeTestSeafileConfPath = \"/tmp/conf\"\n\tmergeTestSeafileDataDir  = \"/tmp/conf/seafile-data\"\n)\n\nvar mergeTestTree1 string\nvar mergeTestTree2 string\nvar mergeTestTree3 string\nvar mergeTestTree4 string\nvar mergeTestTree5 string\nvar mergeTestTree1CommitID string\nvar mergeTestTree2CommitID string\nvar mergeTestTree3CommitID string\nvar mergeTestTree4CommitID string\n\n/*\ntest directory structure:\ntree1\n|--bbb\n\n\t|-- testfile(size:1)\n\ntree2\n|--bbb\n\n\t|-- testfile(size:10)\n\ntree3\n|--bbb\n\ntree4\n|--bbb\n\n\t|-- testfile(size:100)\n\ntree5\n|--\n*/\nfunc mergeTestCreateTestDir() error {\n\tmodeDir := uint32(syscall.S_IFDIR | 0644)\n\tmodeFile := uint32(syscall.S_IFREG | 0644)\n\n\temptyDir, err := mergeTestCreateSeafdir(nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tmergeTestTree5 = emptyDir\n\n\tfile1, err := fsmgr.NewSeafile(1, 1, []string{\"4f616f98d6a264f75abffe1bc150019c880be239\"})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafile: %v\", err)\n\t\treturn err\n\t}\n\terr = fsmgr.SaveSeafile(mergeTestRepoID, file1)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save seafile: %v\", err)\n\t\treturn err\n\t}\n\n\tdent1 := fsmgr.SeafDirent{ID: file1.FileID, Name: \"testfile\", Mode: modeFile, Size: 1}\n\tdir1, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent1})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\tdent2 := fsmgr.SeafDirent{ID: dir1, Name: \"bbb\", Mode: modeDir}\n\tdir2, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent2})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tmergeTestTree1 = dir2\n\n\tcommit1 := commitmgr.NewCommit(mergeTestRepoID, \"\", mergeTestTree1, \"seafile\", \"this is the first commit.\\n\")\n\terr = commitmgr.Save(commit1)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save commit: %v\", err)\n\t\treturn err\n\t}\n\tmergeTestTree1CommitID = commit1.CommitID\n\n\tfile2, err := fsmgr.NewSeafile(1, 10, []string{\"4f616f98d6a264f75abffe1bc150019c880be239\"})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafile: %v\", err)\n\t\treturn err\n\t}\n\terr = fsmgr.SaveSeafile(mergeTestRepoID, file2)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save seafile: %v\", err)\n\t\treturn err\n\t}\n\n\tdent3 := fsmgr.SeafDirent{ID: file2.FileID, Name: \"testfile\", Mode: modeFile, Size: 10}\n\tdir3, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent3})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tdent4 := fsmgr.SeafDirent{ID: dir3, Name: \"bbb\", Mode: modeDir}\n\tdir4, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent4})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tmergeTestTree2 = dir4\n\n\tcommit2 := commitmgr.NewCommit(mergeTestRepoID, \"\", mergeTestTree2, \"seafile\", \"this is the second commit.\\n\")\n\terr = commitmgr.Save(commit2)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save commit: %v\", err)\n\t\treturn err\n\t}\n\tmergeTestTree2CommitID = commit2.CommitID\n\n\tdir5, err := mergeTestCreateSeafdir(nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tdent6 := fsmgr.SeafDirent{ID: dir5, Name: \"bbb\", Mode: modeDir}\n\tdir6, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent6})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tmergeTestTree3 = dir6\n\n\tcommit3 := commitmgr.NewCommit(mergeTestRepoID, \"\", mergeTestTree3, \"seafile\", \"this is the third commit.\\n\")\n\terr = commitmgr.Save(commit3)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save commit: %v\", err)\n\t\treturn err\n\t}\n\tmergeTestTree3CommitID = commit3.CommitID\n\n\tfile3, err := fsmgr.NewSeafile(1, 100, []string{\"4f616f98d6a264f75abffe1bc150019c880be240\"})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafile: %v\", err)\n\t\treturn err\n\t}\n\terr = fsmgr.SaveSeafile(mergeTestRepoID, file3)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save seafile: %v\", err)\n\t\treturn err\n\t}\n\tdent7 := fsmgr.SeafDirent{ID: file3.FileID, Name: \"testfile\", Mode: modeFile, Size: 100}\n\tdir7, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent7})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tdent8 := fsmgr.SeafDirent{ID: dir7, Name: \"bbb\", Mode: modeDir}\n\tdir8, err := mergeTestCreateSeafdir([]*fsmgr.SeafDirent{&dent8})\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get seafdir: %v\", err)\n\t\treturn err\n\t}\n\n\tmergeTestTree4 = dir8\n\n\tcommit4 := commitmgr.NewCommit(mergeTestRepoID, \"\", mergeTestTree3, \"seafile\", \"this is the fourth commit.\\n\")\n\terr = commitmgr.Save(commit4)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to save commit: %v\", err)\n\t\treturn err\n\t}\n\tmergeTestTree4CommitID = commit4.CommitID\n\n\treturn nil\n}\n\nfunc mergeTestCreateSeafdir(dents []*fsmgr.SeafDirent) (string, error) {\n\tseafdir, err := fsmgr.NewSeafdir(1, dents)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to new seafdir: %v\", err)\n\t\treturn \"\", err\n\t}\n\terr = fsmgr.SaveSeafdir(mergeTestRepoID, seafdir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn seafdir.DirID, nil\n}\n\nfunc mergeTestDelFile() error {\n\terr := os.RemoveAll(mergeTestSeafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc TestMergeTrees(t *testing.T) {\n\tcommitmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir)\n\tfsmgr.Init(mergeTestSeafileConfPath, mergeTestSeafileDataDir, option.FsCacheLimit)\n\terr := mergeTestCreateTestDir()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create test dir: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tt.Run(\"test1\", testMergeTrees1)\n\tt.Run(\"test2\", testMergeTrees2)\n\tt.Run(\"test3\", testMergeTrees3)\n\tt.Run(\"test4\", testMergeTrees4)\n\tt.Run(\"test5\", testMergeTrees5)\n\tt.Run(\"test6\", testMergeTrees6)\n\tt.Run(\"test7\", testMergeTrees7)\n\tt.Run(\"test8\", testMergeTrees8)\n\tt.Run(\"test9\", testMergeTrees9)\n\tt.Run(\"test10\", testMergeTrees10)\n\tt.Run(\"test11\", testMergeTrees11)\n\tt.Run(\"test12\", testMergeTrees12)\n\n\terr = mergeTestDelFile()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to remove test file : %v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\n// head add file\nfunc testMergeTrees1(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree3, mergeTestTree2, mergeTestTree3}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// remote add file\nfunc testMergeTrees2(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree3, mergeTestTree3, mergeTestTree2}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// head modify file\nfunc testMergeTrees3(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree1}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// remote modify file\nfunc testMergeTrees4(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree1, mergeTestTree2}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// head and remote add file\nfunc testMergeTrees5(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree2}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif !opt.conflict {\n\t\tt.Errorf(\"merge error %s.\\n\", opt.mergedRoot)\n\t}\n}\n\n// head and remote modify file\nfunc testMergeTrees6(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree4}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif !opt.conflict {\n\t\tt.Errorf(\"merge error %s.\\n\", opt.mergedRoot)\n\t}\n}\n\n// head modify file and remote delete file\nfunc testMergeTrees7(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree3}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// head delete file and remote modify file\nfunc testMergeTrees8(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree3, mergeTestTree2}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// head modify file and remote delete dir of this file\nfunc testMergeTrees9(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree2, mergeTestTree5}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// remote modify file and head delete dir of this file\nfunc testMergeTrees10(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree1CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree1, mergeTestTree5, mergeTestTree2}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree2 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree2)\n\t}\n}\n\n// head add file and remote delete dir of thie file\nfunc testMergeTrees11(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree3, mergeTestTree1, mergeTestTree5}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree1 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree1)\n\t}\n}\n\n// remote add file and head delete dir of this file\nfunc testMergeTrees12(t *testing.T) {\n\tcommit, err := commitmgr.Load(mergeTestRepoID, mergeTestTree3CommitID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load commit.\\n\")\n\t}\n\troots := []string{mergeTestTree3, mergeTestTree5, mergeTestTree1}\n\topt := new(mergeOptions)\n\topt.remoteRepoID = mergeTestRepoID\n\topt.remoteHead = commit.CommitID\n\n\terr = mergeTrees(mergeTestRepoID, roots, opt)\n\tif err != nil {\n\t\tt.Errorf(\"failed to merge.\\n\")\n\t}\n\tif opt.mergedRoot != mergeTestTree1 {\n\t\tt.Errorf(\"merge error %s/%s.\\n\", opt.mergedRoot, mergeTestTree1)\n\t}\n}\n"
  },
  {
    "path": "fileserver/metrics/metrics.go",
    "content": "package metrics\n\nimport (\n\t\"container/list\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"runtime/debug\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/dgraph-io/ristretto/z\"\n\t\"github.com/go-redis/redis/v8\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tRedisChannel   = \"metric_channel\"\n\tComponentName  = \"go_fileserver\"\n\tMetricInterval = 30 * time.Second\n)\n\ntype MetricMgr struct {\n\tsync.Mutex\n\tinFlightRequestList *list.List\n}\n\ntype RequestInfo struct {\n\turlPath string\n\tmethod  string\n\tstart   time.Time\n}\n\nfunc (m *MetricMgr) AddReq(urlPath, method string) *list.Element {\n\treq := new(RequestInfo)\n\treq.urlPath = urlPath\n\treq.method = method\n\treq.start = time.Now()\n\n\tm.Lock()\n\tdefer m.Unlock()\n\te := m.inFlightRequestList.PushBack(req)\n\n\treturn e\n}\n\nfunc (m *MetricMgr) DecReq(e *list.Element) {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tm.inFlightRequestList.Remove(e)\n}\n\nvar (\n\tclient *redis.Client\n\tcloser *z.Closer\n\n\tmetricMgr *MetricMgr\n)\n\nfunc Init() {\n\tif !option.HasRedisOptions {\n\t\treturn\n\t}\n\tmetricMgr = new(MetricMgr)\n\tmetricMgr.inFlightRequestList = list.New()\n\n\tcloser = z.NewCloser(1)\n\tgo metricsHandler()\n}\n\nfunc Stop() {\n\tif !option.HasRedisOptions {\n\t\treturn\n\t}\n\tcloser.SignalAndWait()\n}\n\nfunc metricsHandler() {\n\tdefer closer.Done()\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\n\tserver := fmt.Sprintf(\"%s:%d\", option.RedisHost, option.RedisPort)\n\topt := &redis.Options{\n\t\tAddr:     server,\n\t\tPassword: option.RedisPasswd,\n\t}\n\topt.PoolSize = 1\n\n\tclient = redis.NewClient(opt)\n\n\tticker := time.NewTicker(MetricInterval)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-closer.HasBeenClosed():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\terr := publishMetrics()\n\t\t\tif err != nil {\n\t\t\t\tlog.Warnf(\"Failed to publish metrics to redis channel: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc MetricMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\treq := metricMgr.AddReq(r.URL.Path, r.Method)\n\t\tnext.ServeHTTP(w, r)\n\t\tmetricMgr.DecReq(req)\n\t})\n}\n\ntype MetricMessage struct {\n\tMetricName    string `json:\"metric_name\"`\n\tMetricValue   any    `json:\"metric_value\"`\n\tMetricType    string `json:\"metric_type\"`\n\tComponentName string `json:\"component_name\"`\n\tMetricHelp    string `json:\"metric_help\"`\n\tNodeName      string `json:\"node_name\"`\n}\n\nfunc publishMetrics() error {\n\tmetricMgr.Lock()\n\tinFlightRequestCount := metricMgr.inFlightRequestList.Len()\n\tmetricMgr.Unlock()\n\n\tmsg := &MetricMessage{MetricName: \"in_flight_request_total\",\n\t\tMetricValue:   inFlightRequestCount,\n\t\tMetricType:    \"gauge\",\n\t\tComponentName: ComponentName,\n\t\tMetricHelp:    \"The number of currently running http requests.\",\n\t\tNodeName:      option.NodeName,\n\t}\n\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = publishRedisMsg(RedisChannel, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc publishRedisMsg(channel string, msg []byte) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\terr := client.Publish(ctx, channel, msg).Err()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to publish redis message: %w\", err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "fileserver/objstore/backend_fs.go",
    "content": "// Implementation of file system storage backend.\npackage objstore\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path\"\n)\n\ntype fsBackend struct {\n\t// Path of the object directory\n\tobjDir  string\n\tobjType string\n\ttmpDir  string\n}\n\nfunc newFSBackend(seafileDataDir string, objType string) (*fsBackend, error) {\n\tobjDir := path.Join(seafileDataDir, \"storage\", objType)\n\terr := os.MkdirAll(objDir, os.ModePerm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttmpDir := path.Join(seafileDataDir, \"tmpfiles\")\n\terr = os.MkdirAll(tmpDir, os.ModePerm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbackend := new(fsBackend)\n\tbackend.objDir = objDir\n\tbackend.objType = objType\n\tbackend.tmpDir = tmpDir\n\treturn backend, nil\n}\n\nfunc (b *fsBackend) read(repoID string, objID string, w io.Writer) error {\n\tp := path.Join(b.objDir, repoID, objID[:2], objID[2:])\n\tfd, err := os.Open(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fd.Close()\n\n\t_, err = io.Copy(w, fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *fsBackend) write(repoID string, objID string, r io.Reader, sync bool) error {\n\tparentDir := path.Join(b.objDir, repoID, objID[:2])\n\tp := path.Join(parentDir, objID[2:])\n\terr := os.MkdirAll(parentDir, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpDir := b.tmpDir\n\tif b.objType != \"blocks\" {\n\t\ttmpDir = parentDir\n\t}\n\ttFile, err := os.CreateTemp(tmpDir, objID+\".*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuccess := false\n\tdefer func() {\n\t\tif !success {\n\t\t\tos.Remove(tFile.Name())\n\t\t}\n\t}()\n\n\t_, err = io.Copy(tFile, r)\n\tif err != nil {\n\t\ttFile.Close()\n\t\treturn err\n\t}\n\n\terr = tFile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Rename(tFile.Name(), p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsuccess = true\n\treturn nil\n}\n\nfunc (b *fsBackend) exists(repoID string, objID string) (bool, error) {\n\tpath := path.Join(b.objDir, repoID, objID[:2], objID[2:])\n\t_, err := os.Stat(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, err\n\t\t}\n\t\treturn true, err\n\t}\n\treturn true, nil\n}\n\nfunc (b *fsBackend) stat(repoID string, objID string) (int64, error) {\n\tpath := path.Join(b.objDir, repoID, objID[:2], objID[2:])\n\tfileInfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\treturn fileInfo.Size(), nil\n}\n"
  },
  {
    "path": "fileserver/objstore/objstore.go",
    "content": "// Package objstore provides operations for commit, fs and block objects.\n// It is low-level package used by commitmgr, fsmgr, blockmgr packages to access storage.\npackage objstore\n\nimport (\n\t\"io\"\n)\n\n// ObjectStore is a container to access storage backend\ntype ObjectStore struct {\n\t// can be \"commit\", \"fs\", or \"block\"\n\tObjType string\n\tbackend storageBackend\n}\n\n// storageBackend is the interface implemented by storage backends.\n// An object store may have one or multiple storage backends.\ntype storageBackend interface {\n\t// Read an object from backend and write the contents into w.\n\tread(repoID string, objID string, w io.Writer) (err error)\n\t// Write the contents from r to the object.\n\twrite(repoID string, objID string, r io.Reader, sync bool) (err error)\n\t// exists checks whether an object exists.\n\texists(repoID string, objID string) (res bool, err error)\n\t// stat calculates an object's size\n\tstat(repoID string, objID string) (res int64, err error)\n}\n\n// New returns a new object store for a given type of objects.\n// objType can be \"commit\", \"fs\", or \"block\".\nfunc New(seafileConfPath string, seafileDataDir string, objType string) *ObjectStore {\n\tobj := new(ObjectStore)\n\tobj.ObjType = objType\n\tobj.backend, _ = newFSBackend(seafileDataDir, objType)\n\treturn obj\n}\n\n// Read data from storage backends.\nfunc (s *ObjectStore) Read(repoID string, objID string, w io.Writer) (err error) {\n\treturn s.backend.read(repoID, objID, w)\n}\n\n// Write data to storage backends.\nfunc (s *ObjectStore) Write(repoID string, objID string, r io.Reader, sync bool) (err error) {\n\treturn s.backend.write(repoID, objID, r, sync)\n}\n\n// Check whether object exists.\nfunc (s *ObjectStore) Exists(repoID string, objID string) (res bool, err error) {\n\treturn s.backend.exists(repoID, objID)\n}\n\n// Stat calculates object size.\nfunc (s *ObjectStore) Stat(repoID string, objID string) (res int64, err error) {\n\treturn s.backend.stat(repoID, objID)\n}\n"
  },
  {
    "path": "fileserver/objstore/objstore_test.go",
    "content": "package objstore\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n)\n\nconst (\n\ttestFile        = \"output.data\"\n\tseafileConfPath = \"/tmp/conf\"\n\tseafileDataDir  = \"/tmp/conf/seafile-data\"\n\trepoID          = \"b1f2ad61-9164-418a-a47f-ab805dbd5694\"\n\tobjID           = \"0401fc662e3bc87a41f299a907c056aaf8322a27\"\n)\n\nfunc createFile() error {\n\toutputFile, err := os.OpenFile(testFile, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer outputFile.Close()\n\n\toutputString := \"hello world!\\n\"\n\tfor i := 0; i < 10; i++ {\n\t\toutputFile.WriteString(outputString)\n\t}\n\n\treturn nil\n}\n\nfunc delFile() error {\n\terr := os.Remove(testFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.RemoveAll(seafileConfPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc TestMain(m *testing.M) {\n\terr := createFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to create test file : %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tcode := m.Run()\n\terr = delFile()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to remove test file : %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(code)\n}\n\nfunc testWrite(t *testing.T) {\n\tinputFile, err := os.Open(testFile)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to open test file : %v\\n\", err)\n\t}\n\tdefer inputFile.Close()\n\n\tbend := New(seafileConfPath, seafileDataDir, \"commit\")\n\tbend.Write(repoID, objID, inputFile, true)\n}\n\nfunc testRead(t *testing.T) {\n\toutputFile, err := os.OpenFile(testFile, os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to open test file:%v\\n\", err)\n\t}\n\tdefer outputFile.Close()\n\n\tbend := New(seafileConfPath, seafileDataDir, \"commit\")\n\terr = bend.Read(repoID, objID, outputFile)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to read backend : %s\\n\", err)\n\t}\n}\n\nfunc testExists(t *testing.T) {\n\tbend := New(seafileConfPath, seafileDataDir, \"commit\")\n\tret, _ := bend.Exists(repoID, objID)\n\tif !ret {\n\t\tt.Errorf(\"File is not exist\\n\")\n\t}\n\n\tfilePath := path.Join(seafileDataDir, \"storage\", \"commit\", repoID, objID[:2], objID[2:])\n\tfileInfo, _ := os.Stat(filePath)\n\tif fileInfo.Size() != 130 {\n\t\tt.Errorf(\"File is exist, but the size of file is incorrect.\\n\")\n\t}\n}\n\nfunc TestObjStore(t *testing.T) {\n\ttestWrite(t)\n\ttestRead(t)\n\ttestExists(t)\n}\n"
  },
  {
    "path": "fileserver/option/option.go",
    "content": "package option\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com/sirupsen/logrus\"\n\t\"gopkg.in/ini.v1\"\n)\n\n// InfiniteQuota indicates that the quota is unlimited.\nconst InfiniteQuota = -2\n\n// Storage unit.\nconst (\n\tKB = 1000\n\tMB = 1000000\n\tGB = 1000000000\n\tTB = 1000000000000\n)\n\nvar (\n\t// fileserver options\n\tHost                   string\n\tPort                   uint32\n\tMaxUploadSize          uint64\n\tFsIdListRequestTimeout int64\n\t// Block size for indexing uploaded files\n\tFixedBlockSize uint64\n\t// Maximum number of goroutines to index uploaded files\n\tMaxIndexingThreads uint32\n\tWebTokenExpireTime uint32\n\t// File mode for temp files\n\tClusterSharedTempFileMode uint32\n\tWindowsEncoding           string\n\tSkipBlockHash             bool\n\tFsCacheLimit              int64\n\tVerifyClientBlocks        bool\n\tMaxIndexingFiles          uint32\n\n\t// general options\n\tCloudMode bool\n\n\t// notification server\n\tEnableNotification bool\n\tNotificationURL    string\n\n\t// GROUP options\n\tGroupTableName string\n\n\t// quota options\n\tDefaultQuota int64\n\n\t// redis options\n\tHasRedisOptions bool\n\tRedisHost       string\n\tRedisPasswd     string\n\tRedisPort       uint32\n\tRedisExpiry     uint32\n\tRedisMaxConn    uint32\n\tRedisTimeout    time.Duration\n\n\t// Profile password\n\tProfilePassword string\n\tEnableProfiling bool\n\n\t// Go log level\n\tLogLevel string\n\n\t// DB default timeout\n\tDBOpTimeout time.Duration\n\n\t// database\n\tDBType string\n\n\t// seahub\n\tSeahubURL     string\n\tJWTPrivateKey string\n\n\t// metric\n\tNodeName string\n)\n\ntype DBOption struct {\n\tUser          string\n\tPassword      string\n\tHost          string\n\tPort          int\n\tCcnetDbName   string\n\tSeafileDbName string\n\tCaPath        string\n\tUseTLS        bool\n\tSkipVerify    bool\n\tCharset       string\n\tDBEngine      string\n}\n\nfunc initDefaultOptions() {\n\tHost = \"0.0.0.0\"\n\tPort = 8082\n\tFixedBlockSize = 1 << 23\n\tMaxIndexingThreads = 1\n\tWebTokenExpireTime = 7200\n\tClusterSharedTempFileMode = 0600\n\tDefaultQuota = InfiniteQuota\n\tFsCacheLimit = 4 << 30\n\tVerifyClientBlocks = true\n\tFsIdListRequestTimeout = -1\n\tDBOpTimeout = 60 * time.Second\n\tRedisHost = \"127.0.0.1\"\n\tRedisPort = 6379\n\tRedisExpiry = 24 * 3600\n\tRedisMaxConn = 100\n\tRedisTimeout = 1 * time.Second\n\tMaxIndexingFiles = 10\n}\n\nfunc LoadFileServerOptions(centralDir string) {\n\tinitDefaultOptions()\n\n\tseafileConfPath := filepath.Join(centralDir, \"seafile.conf\")\n\n\topts := ini.LoadOptions{}\n\topts.SpaceBeforeInlineComment = true\n\tconfig, err := ini.LoadSources(opts, seafileConfPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load seafile.conf: %v\", err)\n\t}\n\tCloudMode = false\n\tif section, err := config.GetSection(\"general\"); err == nil {\n\t\tif key, err := section.GetKey(\"cloud_mode\"); err == nil {\n\t\t\tCloudMode, _ = key.Bool()\n\t\t}\n\t}\n\n\tnotifServer := os.Getenv(\"INNER_NOTIFICATION_SERVER_URL\")\n\tenableNotifServer := os.Getenv(\"ENABLE_NOTIFICATION_SERVER\")\n\tif notifServer != \"\" && enableNotifServer == \"true\" {\n\t\tNotificationURL = notifServer\n\t\tEnableNotification = true\n\t}\n\n\tif section, err := config.GetSection(\"httpserver\"); err == nil {\n\t\tparseFileServerSection(section)\n\t}\n\tif section, err := config.GetSection(\"fileserver\"); err == nil {\n\t\tparseFileServerSection(section)\n\t}\n\n\tif section, err := config.GetSection(\"quota\"); err == nil {\n\t\tif key, err := section.GetKey(\"default\"); err == nil {\n\t\t\tquotaStr := key.String()\n\t\t\tDefaultQuota = parseQuota(quotaStr)\n\t\t}\n\t}\n\n\tloadCacheOptionFromEnv()\n\n\tGroupTableName = os.Getenv(\"SEAFILE_MYSQL_DB_GROUP_TABLE_NAME\")\n\tif GroupTableName == \"\" {\n\t\tGroupTableName = \"Group\"\n\t}\n\n\tNodeName = os.Getenv(\"NODE_NAME\")\n\tif NodeName == \"\" {\n\t\tNodeName = \"default\"\n\t}\n}\n\nfunc parseFileServerSection(section *ini.Section) {\n\tif key, err := section.GetKey(\"host\"); err == nil {\n\t\tHost = key.String()\n\t}\n\tif key, err := section.GetKey(\"port\"); err == nil {\n\t\tport, err := key.Uint()\n\t\tif err == nil {\n\t\t\tPort = uint32(port)\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"max_upload_size\"); err == nil {\n\t\tsize, err := key.Uint()\n\t\tif err == nil {\n\t\t\tMaxUploadSize = uint64(size) * 1000000\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"max_indexing_threads\"); err == nil {\n\t\tthreads, err := key.Uint()\n\t\tif err == nil {\n\t\t\tMaxIndexingThreads = uint32(threads)\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"fixed_block_size\"); err == nil {\n\t\tblkSize, err := key.Uint64()\n\t\tif err == nil {\n\t\t\tFixedBlockSize = blkSize * (1 << 20)\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"web_token_expire_time\"); err == nil {\n\t\texpire, err := key.Uint()\n\t\tif err == nil {\n\t\t\tWebTokenExpireTime = uint32(expire)\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"cluster_shared_temp_file_mode\"); err == nil {\n\t\tfileMode, err := key.Uint()\n\t\tif err == nil {\n\t\t\tClusterSharedTempFileMode = uint32(fileMode)\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"enable_profiling\"); err == nil {\n\t\tEnableProfiling, _ = key.Bool()\n\t}\n\tif EnableProfiling {\n\t\tif key, err := section.GetKey(\"profile_password\"); err == nil {\n\t\t\tProfilePassword = key.String()\n\t\t} else {\n\t\t\tlog.Fatal(\"password of profiling must be specified.\")\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"go_log_level\"); err == nil {\n\t\tLogLevel = key.String()\n\t}\n\tif key, err := section.GetKey(\"fs_cache_limit\"); err == nil {\n\t\tfsCacheLimit, err := key.Int64()\n\t\tif err == nil {\n\t\t\tFsCacheLimit = fsCacheLimit * 1024 * 1024\n\t\t}\n\t}\n\t// The ratio of physical memory consumption and fs objects is about 4:1,\n\t// and this part of memory is generally not subject to GC. So the value is\n\t// divided by 4.\n\tFsCacheLimit = FsCacheLimit / 4\n\tif key, err := section.GetKey(\"fs_id_list_request_timeout\"); err == nil {\n\t\tfsIdListRequestTimeout, err := key.Int64()\n\t\tif err == nil {\n\t\t\tFsIdListRequestTimeout = fsIdListRequestTimeout\n\t\t}\n\t}\n\tif key, err := section.GetKey(\"verify_client_blocks_after_sync\"); err == nil {\n\t\tVerifyClientBlocks, _ = key.Bool()\n\t}\n\tif key, err := section.GetKey(\"max_indexing_files\"); err == nil {\n\t\tthreads, err := key.Uint()\n\t\tif err == nil && threads > 0 {\n\t\t\tMaxIndexingFiles = uint32(threads)\n\t\t}\n\t}\n}\n\nfunc parseQuota(quotaStr string) int64 {\n\tvar quota int64\n\tvar multiplier int64 = GB\n\tif end := strings.Index(quotaStr, \"kb\"); end > 0 {\n\t\tmultiplier = KB\n\t\tquotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)\n\t\tif err != nil {\n\t\t\treturn InfiniteQuota\n\t\t}\n\t\tquota = quotaInt * multiplier\n\t} else if end := strings.Index(quotaStr, \"mb\"); end > 0 {\n\t\tmultiplier = MB\n\t\tquotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)\n\t\tif err != nil {\n\t\t\treturn InfiniteQuota\n\t\t}\n\t\tquota = quotaInt * multiplier\n\t} else if end := strings.Index(quotaStr, \"gb\"); end > 0 {\n\t\tmultiplier = GB\n\t\tquotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)\n\t\tif err != nil {\n\t\t\treturn InfiniteQuota\n\t\t}\n\t\tquota = quotaInt * multiplier\n\t} else if end := strings.Index(quotaStr, \"tb\"); end > 0 {\n\t\tmultiplier = TB\n\t\tquotaInt, err := strconv.ParseInt(quotaStr[:end], 10, 0)\n\t\tif err != nil {\n\t\t\treturn InfiniteQuota\n\t\t}\n\t\tquota = quotaInt * multiplier\n\t} else {\n\t\tquotaInt, err := strconv.ParseInt(quotaStr, 10, 0)\n\t\tif err != nil {\n\t\t\treturn InfiniteQuota\n\t\t}\n\t\tquota = quotaInt * multiplier\n\t}\n\n\treturn quota\n}\n\nfunc loadCacheOptionFromEnv() {\n\tcacheProvider := os.Getenv(\"CACHE_PROVIDER\")\n\tif cacheProvider != \"redis\" {\n\t\treturn\n\t}\n\n\tHasRedisOptions = true\n\n\tredisHost := os.Getenv(\"REDIS_HOST\")\n\tif redisHost != \"\" {\n\t\tRedisHost = redisHost\n\t}\n\tredisPort := os.Getenv(\"REDIS_PORT\")\n\tif redisPort != \"\" {\n\t\tport, err := strconv.ParseUint(redisPort, 10, 32)\n\t\tif err != nil {\n\t\t\tRedisPort = uint32(port)\n\t\t}\n\t}\n\tredisPasswd := os.Getenv(\"REDIS_PASSWORD\")\n\tif redisPasswd != \"\" {\n\t\tRedisPasswd = redisPasswd\n\t}\n\tredisMaxConn := os.Getenv(\"REDIS_MAX_CONNECTIONS\")\n\tif redisMaxConn != \"\" {\n\t\tmaxConn, err := strconv.ParseUint(redisMaxConn, 10, 32)\n\t\tif err != nil {\n\t\t\tRedisMaxConn = uint32(maxConn)\n\t\t}\n\t}\n\tredisExpiry := os.Getenv(\"REDIS_EXPIRY\")\n\tif redisExpiry != \"\" {\n\t\texpiry, err := strconv.ParseUint(redisExpiry, 10, 32)\n\t\tif err != nil {\n\t\t\tRedisExpiry = uint32(expiry)\n\t\t}\n\t}\n}\n\nfunc LoadSeahubConfig() error {\n\tJWTPrivateKey = os.Getenv(\"JWT_PRIVATE_KEY\")\n\tif JWTPrivateKey == \"\" {\n\t\treturn fmt.Errorf(\"failed to read JWT_PRIVATE_KEY\")\n\t}\n\n\tsiteRoot := os.Getenv(\"SITE_ROOT\")\n\tif siteRoot != \"\" {\n\t\tSeahubURL = fmt.Sprintf(\"http://127.0.0.1:8000%sapi/v2.1/internal\", siteRoot)\n\t} else {\n\t\tSeahubURL = \"http://127.0.0.1:8000/api/v2.1/internal\"\n\t}\n\n\treturn nil\n}\n\nfunc LoadDBOption(centralDir string) (*DBOption, error) {\n\tdbOpt, err := loadDBOptionFromFile(centralDir)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to load database config: %v\", err)\n\t}\n\tdbOpt = loadDBOptionFromEnv(dbOpt)\n\n\tif dbOpt.Host == \"\" {\n\t\treturn nil, fmt.Errorf(\"no database host in seafile.conf.\")\n\t}\n\tif dbOpt.User == \"\" {\n\t\treturn nil, fmt.Errorf(\"no database user in seafile.conf.\")\n\t}\n\tif dbOpt.Password == \"\" {\n\t\treturn nil, fmt.Errorf(\"no database password in seafile.conf.\")\n\t}\n\n\tDBType = dbOpt.DBEngine\n\n\treturn dbOpt, nil\n}\n\nfunc loadDBOptionFromFile(centralDir string) (*DBOption, error) {\n\tdbOpt := new(DBOption)\n\tdbOpt.DBEngine = \"mysql\"\n\n\tseafileConfPath := filepath.Join(centralDir, \"seafile.conf\")\n\topts := ini.LoadOptions{}\n\topts.SpaceBeforeInlineComment = true\n\tconfig, err := ini.LoadSources(opts, seafileConfPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load seafile.conf: %v\", err)\n\t}\n\n\tsection, err := config.GetSection(\"database\")\n\tif err != nil {\n\t\treturn dbOpt, nil\n\t}\n\n\tdbEngine := \"mysql\"\n\tkey, err := section.GetKey(\"type\")\n\tif err == nil {\n\t\tdbEngine = key.String()\n\t}\n\tif dbEngine != \"mysql\" {\n\t\treturn nil, fmt.Errorf(\"unsupported database %s.\", dbEngine)\n\t}\n\tdbOpt.DBEngine = dbEngine\n\tif key, err = section.GetKey(\"host\"); err == nil {\n\t\tdbOpt.Host = key.String()\n\t}\n\t// user is required.\n\tif key, err = section.GetKey(\"user\"); err == nil {\n\t\tdbOpt.User = key.String()\n\t}\n\n\tif key, err = section.GetKey(\"password\"); err == nil {\n\t\tdbOpt.Password = key.String()\n\t}\n\n\tif key, err = section.GetKey(\"db_name\"); err == nil {\n\t\tdbOpt.SeafileDbName = key.String()\n\t}\n\tport := 3306\n\tif key, err = section.GetKey(\"port\"); err == nil {\n\t\tport, _ = key.Int()\n\t}\n\tdbOpt.Port = port\n\tuseTLS := false\n\tif key, err = section.GetKey(\"use_ssl\"); err == nil {\n\t\tuseTLS, _ = key.Bool()\n\t}\n\tdbOpt.UseTLS = useTLS\n\tskipVerify := false\n\tif key, err = section.GetKey(\"skip_verify\"); err == nil {\n\t\tskipVerify, _ = key.Bool()\n\t}\n\tdbOpt.SkipVerify = skipVerify\n\tif key, err = section.GetKey(\"ca_path\"); err == nil {\n\t\tdbOpt.CaPath = key.String()\n\t}\n\tif key, err = section.GetKey(\"connection_charset\"); err == nil {\n\t\tdbOpt.Charset = key.String()\n\t}\n\n\treturn dbOpt, nil\n}\n\nfunc loadDBOptionFromEnv(dbOpt *DBOption) *DBOption {\n\tuser := os.Getenv(\"SEAFILE_MYSQL_DB_USER\")\n\tpassword := os.Getenv(\"SEAFILE_MYSQL_DB_PASSWORD\")\n\thost := os.Getenv(\"SEAFILE_MYSQL_DB_HOST\")\n\tportStr := os.Getenv(\"SEAFILE_MYSQL_DB_PORT\")\n\tccnetDbName := os.Getenv(\"SEAFILE_MYSQL_DB_CCNET_DB_NAME\")\n\tseafileDbName := os.Getenv(\"SEAFILE_MYSQL_DB_SEAFILE_DB_NAME\")\n\n\tif dbOpt == nil {\n\t\tdbOpt = new(DBOption)\n\t}\n\tif user != \"\" {\n\t\tdbOpt.User = user\n\t}\n\tif password != \"\" {\n\t\tdbOpt.Password = password\n\t}\n\tif host != \"\" {\n\t\tdbOpt.Host = host\n\t}\n\tif portStr != \"\" {\n\t\tport, _ := strconv.ParseUint(portStr, 10, 32)\n\t\tif port > 0 {\n\t\t\tdbOpt.Port = int(port)\n\t\t}\n\t}\n\tif dbOpt.Port == 0 {\n\t\tdbOpt.Port = 3306\n\t}\n\tif ccnetDbName != \"\" {\n\t\tdbOpt.CcnetDbName = ccnetDbName\n\t} else if dbOpt.CcnetDbName == \"\" {\n\t\tdbOpt.CcnetDbName = \"ccnet_db\"\n\t\tlog.Infof(\"Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default\")\n\t}\n\tif seafileDbName != \"\" {\n\t\tdbOpt.SeafileDbName = seafileDbName\n\t} else if dbOpt.SeafileDbName == \"\" {\n\t\tdbOpt.SeafileDbName = \"seafile_db\"\n\t\tlog.Infof(\"Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default\")\n\t}\n\treturn dbOpt\n}\n"
  },
  {
    "path": "fileserver/quota.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n)\n\n// InfiniteQuota indicates that the quota is unlimited.\nconst (\n\tInfiniteQuota = -2\n)\n\nfunc checkQuota(repoID string, delta int64) (int, error) {\n\tif repoID == \"\" {\n\t\terr := fmt.Errorf(\"bad argumets\")\n\t\treturn -1, err\n\t}\n\n\tvInfo, err := repomgr.GetVirtualRepoInfo(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get virtual repo: %v\", err)\n\t\treturn -1, err\n\t}\n\trRepoID := repoID\n\tif vInfo != nil {\n\t\trRepoID = vInfo.OriginRepoID\n\t}\n\n\tuser, err := repomgr.GetRepoOwner(rRepoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get repo owner: %v\", err)\n\t\treturn -1, err\n\t}\n\tif user == \"\" {\n\t\terr := fmt.Errorf(\"repo %s has no owner\", repoID)\n\t\treturn -1, err\n\t}\n\tquota, err := getUserQuota(user)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get user quota: %v\", err)\n\t\treturn -1, err\n\t}\n\n\tif quota == InfiniteQuota {\n\t\treturn 0, nil\n\t}\n\tusage, err := getUserUsage(user)\n\tif err != nil || usage < 0 {\n\t\terr := fmt.Errorf(\"failed to get user usage: %v\", err)\n\t\treturn -1, err\n\t}\n\tusage += delta\n\tif usage >= quota {\n\t\treturn 1, nil\n\t}\n\n\treturn 0, nil\n}\n\nfunc getUserQuota(user string) (int64, error) {\n\tvar quota int64\n\tsqlStr := \"SELECT quota FROM UserQuota WHERE user=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, user)\n\tif err := row.Scan(&quota); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn -1, err\n\t\t}\n\t}\n\n\tif quota <= 0 {\n\t\tquota = option.DefaultQuota\n\t}\n\n\treturn quota, nil\n}\n\nfunc getUserUsage(user string) (int64, error) {\n\tvar usage sql.NullInt64\n\tsqlStr := \"SELECT SUM(size) FROM \" +\n\t\t\"RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, \" +\n\t\t\"RepoSize WHERE \" +\n\t\t\"owner_id=? AND o.repo_id=RepoSize.repo_id \" +\n\t\t\"AND v.repo_id IS NULL\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, user)\n\tif err := row.Scan(&usage); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn -1, err\n\t\t}\n\t}\n\n\tif usage.Valid {\n\t\treturn usage.Int64, nil\n\t}\n\n\treturn 0, nil\n}\n"
  },
  {
    "path": "fileserver/repomgr/repomgr.go",
    "content": "// Package repomgr manages repo objects and file operations in repos.\npackage repomgr\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t// Change to non-blank imports when use\n\t_ \"github.com/haiwen/seafile-server/fileserver/blockmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\n// Repo status\nconst (\n\tRepoStatusNormal = iota\n\tRepoStatusReadOnly\n\tNRepoStatus\n)\n\n// Repo contains information about a repo.\ntype Repo struct {\n\tID                   string\n\tName                 string\n\tDesc                 string\n\tLastModifier         string\n\tLastModificationTime int64\n\tHeadCommitID         string\n\tRootID               string\n\tIsCorrupted          bool\n\n\t// Set when repo is virtual\n\tVirtualInfo *VRepoInfo\n\n\t// ID for fs and block store\n\tStoreID string\n\n\t// Encrypted repo info\n\tIsEncrypted   bool\n\tEncVersion    int\n\tMagic         string\n\tRandomKey     string\n\tSalt          string\n\tPwdHash       string\n\tPwdHashAlgo   string\n\tPwdHashParams string\n\tVersion       int\n}\n\n// VRepoInfo contains virtual repo information.\ntype VRepoInfo struct {\n\tRepoID       string\n\tOriginRepoID string\n\tPath         string\n\tBaseCommitID string\n}\n\nvar seafileDB *sql.DB\n\n// Init initialize status of repomgr package\nfunc Init(seafDB *sql.DB) {\n\tseafileDB = seafDB\n}\n\n// Get returns Repo object by repo ID.\nfunc Get(id string) *Repo {\n\tquery := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +\n\t\t`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +\n\t\t`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +\n\t\t`WHERE r.repo_id = ? AND b.name = 'master'`\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tstmt, err := seafileDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to prepare sql : %s ：%v\", query, err)\n\t\treturn nil\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx, id)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to query sql : %v\", err)\n\t\treturn nil\n\t}\n\tdefer rows.Close()\n\n\trepo := new(Repo)\n\n\tvar originRepoID sql.NullString\n\tvar path sql.NullString\n\tvar baseCommitID sql.NullString\n\tif rows.Next() {\n\t\terr := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to scan sql rows : %v\", err)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\treturn nil\n\t}\n\n\tif repo.HeadCommitID == \"\" {\n\t\tlog.Errorf(\"repo %s is corrupted\", id)\n\t\treturn nil\n\t}\n\n\tif originRepoID.Valid {\n\t\trepo.VirtualInfo = new(VRepoInfo)\n\t\trepo.VirtualInfo.RepoID = id\n\t\trepo.VirtualInfo.OriginRepoID = originRepoID.String\n\t\trepo.StoreID = originRepoID.String\n\n\t\tif path.Valid {\n\t\t\trepo.VirtualInfo.Path = path.String\n\t\t}\n\n\t\tif baseCommitID.Valid {\n\t\t\trepo.VirtualInfo.BaseCommitID = baseCommitID.String\n\t\t}\n\t} else {\n\t\trepo.StoreID = repo.ID\n\t}\n\n\tcommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to load commit %s/%s : %v\", repo.ID, repo.HeadCommitID, err)\n\t\treturn nil\n\t}\n\n\trepo.Name = commit.RepoName\n\trepo.Desc = commit.RepoDesc\n\trepo.LastModifier = commit.CreatorName\n\trepo.LastModificationTime = commit.Ctime\n\trepo.RootID = commit.RootID\n\trepo.Version = commit.Version\n\tif commit.Encrypted == \"true\" {\n\t\trepo.IsEncrypted = true\n\t\trepo.EncVersion = commit.EncVersion\n\t\tif repo.EncVersion == 1 && commit.PwdHash == \"\" {\n\t\t\trepo.Magic = commit.Magic\n\t\t} else if repo.EncVersion == 2 {\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t} else if repo.EncVersion == 3 {\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t\trepo.Salt = commit.Salt\n\t\t} else if repo.EncVersion == 4 {\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t\trepo.Salt = commit.Salt\n\t\t}\n\t\tif repo.EncVersion >= 2 && commit.PwdHash == \"\" {\n\t\t\trepo.Magic = commit.Magic\n\t\t}\n\t\tif commit.PwdHash != \"\" {\n\t\t\trepo.PwdHash = commit.PwdHash\n\t\t\trepo.PwdHashAlgo = commit.PwdHashAlgo\n\t\t\trepo.PwdHashParams = commit.PwdHashParams\n\t\t}\n\t}\n\n\treturn repo\n}\n\n// RepoToCommit converts Repo to Commit.\nfunc RepoToCommit(repo *Repo, commit *commitmgr.Commit) {\n\tcommit.RepoID = repo.ID\n\tcommit.RepoName = repo.Name\n\tif repo.IsEncrypted {\n\t\tcommit.Encrypted = \"true\"\n\t\tcommit.EncVersion = repo.EncVersion\n\t\tif repo.EncVersion == 1 && repo.PwdHash == \"\" {\n\t\t\tcommit.Magic = repo.Magic\n\t\t} else if repo.EncVersion == 2 {\n\t\t\tcommit.RandomKey = repo.RandomKey\n\t\t} else if repo.EncVersion == 3 {\n\t\t\tcommit.RandomKey = repo.RandomKey\n\t\t\tcommit.Salt = repo.Salt\n\t\t} else if repo.EncVersion == 4 {\n\t\t\tcommit.RandomKey = repo.RandomKey\n\t\t\tcommit.Salt = repo.Salt\n\t\t}\n\t\tif repo.EncVersion >= 2 && repo.PwdHash == \"\" {\n\t\t\tcommit.Magic = repo.Magic\n\t\t}\n\t\tif repo.PwdHash != \"\" {\n\t\t\tcommit.PwdHash = repo.PwdHash\n\t\t\tcommit.PwdHashAlgo = repo.PwdHashAlgo\n\t\t\tcommit.PwdHashParams = repo.PwdHashParams\n\t\t}\n\t} else {\n\t\tcommit.Encrypted = \"false\"\n\t}\n\tcommit.Version = repo.Version\n}\n\n// GetEx return repo object even if it's corrupted.\nfunc GetEx(id string) *Repo {\n\trepo := new(Repo)\n\tquery := `SELECT r.repo_id, b.commit_id, v.origin_repo, v.path, v.base_commit FROM ` +\n\t\t`Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id ` +\n\t\t`LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id ` +\n\t\t`WHERE r.repo_id = ? AND b.name = 'master'`\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tstmt, err := seafileDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\trepo.IsCorrupted = true\n\t\treturn repo\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx, id)\n\tif err != nil {\n\t\trepo.IsCorrupted = true\n\t\treturn repo\n\t}\n\tdefer rows.Close()\n\n\tvar originRepoID sql.NullString\n\tvar path sql.NullString\n\tvar baseCommitID sql.NullString\n\tif rows.Next() {\n\t\terr := rows.Scan(&repo.ID, &repo.HeadCommitID, &originRepoID, &path, &baseCommitID)\n\t\tif err != nil {\n\t\t\trepo.IsCorrupted = true\n\t\t\treturn repo\n\n\t\t}\n\t} else if rows.Err() != nil {\n\t\trepo.IsCorrupted = true\n\t\treturn repo\n\t} else {\n\t\treturn nil\n\t}\n\tif originRepoID.Valid {\n\t\trepo.VirtualInfo = new(VRepoInfo)\n\t\trepo.VirtualInfo.RepoID = id\n\t\trepo.VirtualInfo.OriginRepoID = originRepoID.String\n\t\trepo.StoreID = originRepoID.String\n\n\t\tif path.Valid {\n\t\t\trepo.VirtualInfo.Path = path.String\n\t\t}\n\n\t\tif baseCommitID.Valid {\n\t\t\trepo.VirtualInfo.BaseCommitID = baseCommitID.String\n\t\t}\n\t} else {\n\t\trepo.StoreID = repo.ID\n\t}\n\n\tif repo.HeadCommitID == \"\" {\n\t\trepo.IsCorrupted = true\n\t\treturn repo\n\t}\n\n\tcommit, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\tlog.Errorf(\"failed to load commit %s/%s : %v\", repo.ID, repo.HeadCommitID, err)\n\t\trepo.IsCorrupted = true\n\t\treturn repo\n\t}\n\n\trepo.Name = commit.RepoName\n\trepo.LastModifier = commit.CreatorName\n\trepo.LastModificationTime = commit.Ctime\n\trepo.RootID = commit.RootID\n\trepo.Version = commit.Version\n\tif commit.Encrypted == \"true\" {\n\t\trepo.IsEncrypted = true\n\t\trepo.EncVersion = commit.EncVersion\n\t\tif repo.EncVersion == 1 {\n\t\t\trepo.Magic = commit.Magic\n\t\t} else if repo.EncVersion == 2 {\n\t\t\trepo.Magic = commit.Magic\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t} else if repo.EncVersion == 3 {\n\t\t\trepo.Magic = commit.Magic\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t\trepo.Salt = commit.Salt\n\t\t} else if repo.EncVersion == 4 {\n\t\t\trepo.Magic = commit.Magic\n\t\t\trepo.RandomKey = commit.RandomKey\n\t\t\trepo.Salt = commit.Salt\n\t\t}\n\t\tif commit.PwdHash != \"\" {\n\t\t\trepo.PwdHash = commit.PwdHash\n\t\t\trepo.PwdHashAlgo = commit.PwdHashAlgo\n\t\t\trepo.PwdHashParams = commit.PwdHashParams\n\t\t}\n\t}\n\n\treturn repo\n}\n\n// GetVirtualRepoInfo return virtual repo info by repo id.\nfunc GetVirtualRepoInfo(repoID string) (*VRepoInfo, error) {\n\tsqlStr := \"SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo WHERE repo_id = ?\"\n\tvRepoInfo := new(VRepoInfo)\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, nil\n\t}\n\treturn vRepoInfo, nil\n}\n\n// GetVirtualRepoInfoByOrigin return virtual repo info by origin repo id.\nfunc GetVirtualRepoInfoByOrigin(originRepo string) ([]*VRepoInfo, error) {\n\tsqlStr := \"SELECT repo_id, origin_repo, path, base_commit \" +\n\t\t\"FROM VirtualRepo WHERE origin_repo=?\"\n\tvar vRepos []*VRepoInfo\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow, err := seafileDB.QueryContext(ctx, sqlStr, originRepo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer row.Close()\n\tfor row.Next() {\n\t\tvRepoInfo := new(VRepoInfo)\n\t\tif err := row.Scan(&vRepoInfo.RepoID, &vRepoInfo.OriginRepoID, &vRepoInfo.Path, &vRepoInfo.BaseCommitID); err != nil {\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tvRepos = append(vRepos, vRepoInfo)\n\t}\n\n\treturn vRepos, nil\n}\n\n// GetEmailByToken return user's email by token.\nfunc GetEmailByToken(repoID string, token string) (string, error) {\n\tvar email string\n\tsqlStr := \"SELECT email FROM RepoUserToken WHERE repo_id = ? AND token = ?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, token)\n\tif err := row.Scan(&email); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn email, err\n\t\t}\n\t}\n\treturn email, nil\n}\n\n// GetRepoStatus return repo status by repo id.\nfunc GetRepoStatus(repoID string) (int, error) {\n\tvar status int = -1\n\n\t// First, check origin repo's status.\n\tsqlStr := \"SELECT i.status FROM VirtualRepo v LEFT JOIN RepoInfo i \" +\n\t\t\"ON i.repo_id=v.origin_repo WHERE v.repo_id=? \" +\n\t\t\"AND i.repo_id IS NOT NULL\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&status); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn status, err\n\t\t} else {\n\t\t\tstatus = -1\n\t\t}\n\t}\n\tif status >= 0 {\n\t\treturn status, nil\n\t}\n\n\t// Then, check repo's own status.\n\tsqlStr = \"SELECT status FROM RepoInfo WHERE repo_id=?\"\n\trow = seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&status); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn status, err\n\t\t}\n\t}\n\treturn status, nil\n}\n\n// TokenPeerInfoExists check if the token exists.\nfunc TokenPeerInfoExists(token string) (bool, error) {\n\tvar exists string\n\tsqlStr := \"SELECT token FROM RepoTokenPeerInfo WHERE token=?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, token)\n\tif err := row.Scan(&exists); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n// AddTokenPeerInfo add token peer info to RepoTokenPeerInfo table.\nfunc AddTokenPeerInfo(token, peerID, peerIP, peerName, clientVer string, syncTime int64) error {\n\tsqlStr := \"INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)\" +\n\t\t\"VALUES (?, ?, ?, ?, ?, ?)\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tif _, err := seafileDB.ExecContext(ctx, sqlStr, token, peerID, peerIP, peerName, syncTime, clientVer); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// UpdateTokenPeerInfo update token peer info to RepoTokenPeerInfo table.\nfunc UpdateTokenPeerInfo(token, peerID, clientVer string, syncTime int64) error {\n\tsqlStr := \"UPDATE RepoTokenPeerInfo SET \" +\n\t\t\"peer_ip=?, sync_time=?, client_ver=? WHERE token=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tif _, err := seafileDB.ExecContext(ctx, sqlStr, peerID, syncTime, clientVer, token); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// GetUploadTmpFile gets the timp file path of upload file.\nfunc GetUploadTmpFile(repoID, filePath string) (string, error) {\n\tvar filePathNoSlash string\n\tif filePath[0] == '/' {\n\t\tfilePathNoSlash = filePath[1:]\n\t} else {\n\t\tfilePathNoSlash = filePath\n\t\tfilePath = \"/\" + filePath\n\t}\n\n\tvar tmpFile string\n\tsqlStr := \"SELECT tmp_file_path FROM WebUploadTempFiles WHERE repo_id = ? AND file_path = ?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePath)\n\tif err := row.Scan(&tmpFile); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tif tmpFile == \"\" {\n\t\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, filePathNoSlash)\n\t\tif err := row.Scan(&tmpFile); err != nil {\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tmpFile, nil\n}\n\n// AddUploadTmpFile adds the tmp file path of upload file.\nfunc AddUploadTmpFile(repoID, filePath, tmpFile string) error {\n\tif filePath[0] != '/' {\n\t\tfilePath = \"/\" + filePath\n\t}\n\n\tsqlStr := \"INSERT INTO WebUploadTempFiles (repo_id, file_path, tmp_file_path) VALUES (?, ?, ?)\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, tmpFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DelUploadTmpFile deletes the tmp file path of upload file.\nfunc DelUploadTmpFile(repoID, filePath string) error {\n\tvar filePathNoSlash string\n\tif filePath[0] == '/' {\n\t\tfilePathNoSlash = filePath[1:]\n\t} else {\n\t\tfilePathNoSlash = filePath\n\t\tfilePath = \"/\" + filePath\n\t}\n\n\tsqlStr := \"DELETE FROM WebUploadTempFiles WHERE repo_id = ? AND file_path IN (?, ?)\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID, filePath, filePathNoSlash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc setRepoCommitToDb(repoID, repoName string, updateTime int64, version int, isEncrypted string, lastModifier string) error {\n\tvar exists int\n\tvar encrypted int\n\n\tsqlStr := \"SELECT 1 FROM RepoInfo WHERE repo_id=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&exists); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn err\n\t\t}\n\t}\n\tif updateTime == 0 {\n\t\tupdateTime = time.Now().Unix()\n\t}\n\n\tif isEncrypted == \"true\" {\n\t\tencrypted = 1\n\t}\n\n\tif exists == 1 {\n\t\tsqlStr := \"UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, \" +\n\t\t\t\"last_modifier=? WHERE repo_id=?\"\n\t\tif _, err := seafileDB.ExecContext(ctx, sqlStr, repoName, updateTime, version, encrypted, lastModifier, repoID); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsqlStr := \"INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) \" +\n\t\t\t\"VALUES (?, ?, ?, ?, ?, ?)\"\n\t\tif _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, repoName, updateTime, version, encrypted, lastModifier); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// SetVirtualRepoBaseCommitPath updates the table of VirtualRepo.\nfunc SetVirtualRepoBaseCommitPath(repoID, baseCommitID, newPath string) error {\n\tsqlStr := \"UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tif _, err := seafileDB.ExecContext(ctx, sqlStr, baseCommitID, newPath, repoID); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// GetVirtualRepoIDsByOrigin return the virtual repo ids by origin repo id.\nfunc GetVirtualRepoIDsByOrigin(repoID string) ([]string, error) {\n\tsqlStr := \"SELECT repo_id FROM VirtualRepo WHERE origin_repo=?\"\n\n\tvar id string\n\tvar ids []string\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow, err := seafileDB.QueryContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer row.Close()\n\tfor row.Next() {\n\t\tif err := row.Scan(&id); err != nil {\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\n\treturn ids, nil\n}\n\n// DelVirtualRepo deletes virtual repo from database.\nfunc DelVirtualRepo(repoID string, cloudMode bool) error {\n\terr := removeVirtualRepoOndisk(repoID, cloudMode)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to remove virtual repo on disk: %v\", err)\n\t\treturn err\n\t}\n\tsqlStr := \"DELETE FROM VirtualRepo WHERE repo_id = ?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc removeVirtualRepoOndisk(repoID string, cloudMode bool) error {\n\tsqlStr := \"DELETE FROM Repo WHERE repo_id = ?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsqlStr = \"SELECT name, repo_id, commit_id FROM Branch WHERE repo_id=?\"\n\trows, err := seafileDB.QueryContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar name, id, commitID string\n\t\tif err := rows.Scan(&name, &id, &commitID); err != nil {\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsqlStr := \"DELETE FROM RepoHead WHERE branch_name = ? AND repo_id = ?\"\n\t\t_, err := seafileDB.ExecContext(ctx, sqlStr, name, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsqlStr = \"DELETE FROM Branch WHERE name=? AND repo_id=?\"\n\t\t_, err = seafileDB.ExecContext(ctx, sqlStr, name, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsqlStr = \"DELETE FROM RepoOwner WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsqlStr = \"DELETE FROM SharedRepo WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsqlStr = \"DELETE FROM RepoGroup WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !cloudMode {\n\t\tsqlStr = \"DELETE FROM InnerPubRepo WHERE repo_id = ?\"\n\t\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsqlStr = \"DELETE FROM RepoUserToken WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsqlStr = \"DELETE FROM RepoValidSince WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsqlStr = \"DELETE FROM RepoSize WHERE repo_id = ?\"\n\t_, err = seafileDB.ExecContext(ctx, sqlStr, repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar exists int\n\tsqlStr = \"SELECT 1 FROM GarbageRepos WHERE repo_id=?\"\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&exists); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn err\n\t\t}\n\t}\n\tif exists == 0 {\n\t\tsqlStr = \"INSERT INTO GarbageRepos (repo_id) VALUES (?)\"\n\t\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsqlStr = \"REPLACE INTO GarbageRepos (repo_id) VALUES (?)\"\n\t\t_, err := seafileDB.ExecContext(ctx, sqlStr, repoID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// IsVirtualRepo check if the repo is a virtual reop.\nfunc IsVirtualRepo(repoID string) (bool, error) {\n\tvar exists int\n\tsqlStr := \"SELECT 1 FROM VirtualRepo WHERE repo_id = ?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&exists); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn true, nil\n\n}\n\n// GetRepoOwner get the owner of repo.\nfunc GetRepoOwner(repoID string) (string, error) {\n\tvar owner string\n\tsqlStr := \"SELECT owner_id FROM RepoOwner WHERE repo_id=?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&owner); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn owner, nil\n}\n\nfunc UpdateRepoInfo(repoID, commitID string) error {\n\thead, err := commitmgr.Load(repoID, commitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get commit %s:%s\", repoID, commitID)\n\t\treturn err\n\t}\n\n\tsetRepoCommitToDb(repoID, head.RepoName, head.Ctime, head.Version, head.Encrypted, head.CreatorName)\n\n\treturn nil\n}\n\nfunc HasLastGCID(repoID, clientID string) (bool, error) {\n\tsqlStr := \"SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?\"\n\n\tvar exist int\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID)\n\tif err := row.Scan(&exist); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn false, err\n\t\t}\n\t}\n\tif exist == 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\nfunc GetLastGCID(repoID, clientID string) (string, error) {\n\tsqlStr := \"SELECT gc_id FROM LastGCID WHERE repo_id = ? AND client_id = ?\"\n\n\tvar gcID sql.NullString\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, clientID)\n\tif err := row.Scan(&gcID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn gcID.String, nil\n}\n\nfunc GetCurrentGCID(repoID string) (string, error) {\n\tsqlStr := \"SELECT gc_id FROM GCID WHERE repo_id = ?\"\n\n\tvar gcID sql.NullString\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&gcID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn gcID.String, nil\n}\n\nfunc RemoveLastGCID(repoID, clientID string) error {\n\tsqlStr := \"DELETE FROM LastGCID WHERE repo_id = ? AND client_id = ?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tif _, err := seafileDB.ExecContext(ctx, sqlStr, repoID, clientID); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc SetLastGCID(repoID, clientID, gcID string) error {\n\texist, err := HasLastGCID(repoID, clientID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tif exist {\n\t\tsqlStr := \"UPDATE LastGCID SET gc_id = ? WHERE repo_id = ? AND client_id = ?\"\n\t\tif _, err = seafileDB.ExecContext(ctx, sqlStr, gcID, repoID, clientID); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsqlStr := \"INSERT INTO LastGCID (repo_id, client_id, gc_id) VALUES (?, ?, ?)\"\n\t\tif _, err = seafileDB.ExecContext(ctx, sqlStr, repoID, clientID, gcID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "fileserver/repomgr/repomgr_test.go",
    "content": "package repomgr\n\nimport (\n\t\"database/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t_ \"github.com/go-sql-driver/mysql\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/searpc\"\n)\n\nconst (\n\t//\trepoID          = \"9646f13e-bbab-4eaf-9a84-fb6e1cd776b3\"\n\tuser            = \"seafile\"\n\tpassword        = \"seafile\"\n\thost            = \"127.0.0.1\"\n\tport            = 3306\n\tdbName          = \"seafile-db\"\n\tuseTLS          = false\n\tseafileConfPath = \"/root/conf\"\n\tseafileDataDir  = \"/root/conf/seafile-data\"\n\trepoName        = \"repo\"\n\tuserName        = \"seafile@seafile.com\"\n\tencVersion      = 2\n\tpipePath        = \"/root/runtime/seafile.sock\"\n\tservice         = \"seafserv-threaded-rpcserver\"\n)\n\nvar repoID string\nvar client *searpc.Client\n\nfunc createRepo() string {\n\tid, err := client.Call(\"seafile_create_repo\", repoName, \"\", userName, nil, encVersion)\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create repo.\\n\")\n\t}\n\tif id == nil {\n\t\tfmt.Printf(\"repo id is nil.\\n\")\n\t\tos.Exit(1)\n\t}\n\n\trepoid, ok := id.(string)\n\tif !ok {\n\t\tfmt.Printf(\"returned value isn't repo id.\\n\")\n\t}\n\treturn repoid\n}\n\nfunc delRepo() {\n\t_, err := client.Call(\"seafile_destroy_repo\", repoID)\n\tif err != nil {\n\t\tfmt.Printf(\"failed to del repo.\\n\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc TestMain(m *testing.M) {\n\tclient = searpc.Init(pipePath, service, 10)\n\trepoID = createRepo()\n\tdsn := fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=%t\", user, password, host, port, dbName, useTLS)\n\tseafDB, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to open database: %v\", err)\n\t}\n\tInit(seafDB)\n\tcommitmgr.Init(seafileConfPath, seafileDataDir)\n\tcode := m.Run()\n\tdelRepo()\n\tos.Exit(code)\n}\n\nfunc TestGet(t *testing.T) {\n\trepo := Get(repoID)\n\tif repo == nil {\n\t\tt.Errorf(\"failed to get repo : %s.\\n\", repoID)\n\t\tt.FailNow()\n\t}\n\n\tif repo.ID != repoID {\n\t\tt.Errorf(\"failed to get repo : %s.\\n\", repoID)\n\t}\n}\n"
  },
  {
    "path": "fileserver/searpc/searpc.go",
    "content": "// Package searpc implements searpc client protocol with unix pipe transport.\npackage searpc\n\nimport (\n\t\"bufio\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n)\n\n// Client represents a connections to the RPC server.\ntype Client struct {\n\t// path of the named pipe\n\tpipePath string\n\t// RPC service name\n\tService string\n\n\tpool    chan *net.UnixConn\n\tmaxConn int\n}\n\ntype request struct {\n\tService string `json:\"service\"`\n\tRequest string `json:\"request\"`\n}\n\n// Init initializes rpc client.\nfunc Init(pipePath string, service string, maxConn int) *Client {\n\tclient := new(Client)\n\tclient.pipePath = pipePath\n\tclient.Service = service\n\n\tclient.maxConn = maxConn\n\tclient.pool = make(chan *net.UnixConn, maxConn)\n\n\treturn client\n}\n\n// Call calls the RPC function funcname with variadic parameters.\n// The return value of the RPC function is return as interface{} type\n// The true returned type can be int32, int64, string, struct (object), list of struct (objects) or JSON\nfunc (c *Client) Call(funcname string, params ...interface{}) (interface{}, error) {\n\t// TODO: use reflection to compose requests and parse results.\n\n\tconn, err := c.getConn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thasErr := false\n\tdefer func() {\n\t\tif hasErr {\n\t\t\tconn.Close()\n\t\t} else {\n\t\t\tc.returnConn(conn)\n\t\t}\n\t}()\n\n\tvar req []interface{}\n\treq = append(req, funcname)\n\treq = append(req, params...)\n\tjsonstr, err := json.Marshal(req)\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"failed to encode rpc call to json : %v\", err)\n\t\treturn nil, err\n\t}\n\n\treqHeader := new(request)\n\treqHeader.Service = c.Service\n\treqHeader.Request = string(jsonstr)\n\n\tjsonstr, err = json.Marshal(reqHeader)\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"failed to convert object to json : %v\", err)\n\t\treturn nil, err\n\t}\n\n\theader := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(header, uint32(len(jsonstr)))\n\t_, err = conn.Write([]byte(header))\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"Failed to write rpc request header : %v\", err)\n\t\treturn nil, err\n\t}\n\n\t_, err = conn.Write([]byte(jsonstr))\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"Failed to write rpc request body : %v\", err)\n\t\treturn nil, err\n\t}\n\n\treader := bufio.NewReader(conn)\n\tbuflen := make([]byte, 4)\n\t_, err = io.ReadFull(reader, buflen)\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"failed to read response header from rpc server : %v\", err)\n\t\treturn nil, err\n\t}\n\tretlen := binary.LittleEndian.Uint32(buflen)\n\n\tmsg := make([]byte, retlen)\n\t_, err = io.ReadFull(reader, msg)\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"failed to read response body from rpc server : %v\", err)\n\t\treturn nil, err\n\t}\n\n\tretlist := make(map[string]interface{})\n\terr = json.Unmarshal(msg, &retlist)\n\tif err != nil {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"failed to decode rpc response : %v\", err)\n\t\treturn nil, err\n\t}\n\n\tif _, ok := retlist[\"err_code\"]; ok {\n\t\thasErr = true\n\t\terr := fmt.Errorf(\"searpc server returned error : %v\", retlist[\"err_msg\"])\n\t\treturn nil, err\n\t}\n\n\tif _, ok := retlist[\"ret\"]; ok {\n\t\tret := retlist[\"ret\"]\n\t\treturn ret, nil\n\t}\n\n\thasErr = true\n\terr = fmt.Errorf(\"No value returned\")\n\treturn nil, err\n}\n\nfunc (c *Client) getConn() (*net.UnixConn, error) {\n\tselect {\n\tcase conn := <-c.pool:\n\t\treturn conn, nil\n\tdefault:\n\t\tunixAddr, err := net.ResolveUnixAddr(\"unix\", c.pipePath)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to resolve unix addr when calling rpc : %w\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tconn, err := net.DialUnix(\"unix\", nil, unixAddr)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to dial unix when calling rpc : %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn conn, nil\n\t}\n}\n\nfunc (c *Client) returnConn(conn *net.UnixConn) {\n\tselect {\n\tcase c.pool <- conn:\n\tdefault:\n\t\tconn.Close()\n\t}\n}\n"
  },
  {
    "path": "fileserver/searpc/searpc_test.go",
    "content": "package searpc\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nconst (\n\trepoName   = \"repo\"\n\tuserName   = \"seafile@seafile.com\"\n\tencVersion = 2\n\tpipePath   = \"/root/runtime/seafile.sock\"\n\tservice    = \"seafserv-threaded-rpcserver\"\n)\n\nvar client *Client\n\nfunc TestMain(m *testing.M) {\n\tclient = Init(pipePath, service, 10)\n\tcode := m.Run()\n\tos.Exit(code)\n}\n\nfunc TestCallRpc(t *testing.T) {\n\trepoID, err := client.Call(\"seafile_create_repo\", repoName, \"\", userName, nil, encVersion)\n\tif err != nil {\n\t\tt.Errorf(\"failed to create repo.\\n\")\n\t}\n\tif repoID == nil {\n\t\tt.Errorf(\"repo id is nil.\\n\")\n\t\tt.FailNow()\n\t}\n\n\trepo, err := client.Call(\"seafile_get_repo\", repoID)\n\tif err != nil {\n\t\tt.Errorf(\"failed to get repo.\\n\")\n\t}\n\tif repo == nil {\n\t\tt.Errorf(\"repo is nil.\\n\")\n\t\tt.FailNow()\n\t}\n\trepoMap, ok := repo.(map[string]interface{})\n\tif !ok {\n\t\tt.Errorf(\"failed to assert the type.\\n\")\n\t\tt.FailNow()\n\t}\n\tif repoMap[\"id\"] != repoID {\n\t\tt.Errorf(\"wrong repo id.\\n\")\n\t}\n\n\trepoList, err := client.Call(\"seafile_get_repo_list\", -1, -1, \"\")\n\tif err != nil {\n\t\tt.Errorf(\"failed to get repo list.\\n\")\n\t}\n\tif repoList == nil {\n\t\tt.Errorf(\"repo list is nil.\\n\")\n\t\tt.FailNow()\n\t}\n\tvar exists bool\n\trepos, ok := repoList.([]interface{})\n\tif !ok {\n\t\tt.Errorf(\"failed to assert the type.\\n\")\n\t\tt.FailNow()\n\t}\n\tfor _, v := range repos {\n\t\trepo, ok := v.(map[string]interface{})\n\t\tif !ok {\n\t\t\tt.Errorf(\"failed to assert the type.\\n\")\n\t\t\tt.FailNow()\n\t\t}\n\t\tif repo[\"id\"] == repoID {\n\t\t\texists = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif exists != true {\n\t\tt.Errorf(\"can't find repo %s in repo list.\\n\", repoID)\n\t}\n\n\tclient.Call(\"seafile_destroy_repo\", repoID)\n}\n"
  },
  {
    "path": "fileserver/share/group/group.go",
    "content": "// Package group manages group membership and group shares.\npackage group\n"
  },
  {
    "path": "fileserver/share/public/public.go",
    "content": "// Package public manager inner public shares.\npackage public\n"
  },
  {
    "path": "fileserver/share/share.go",
    "content": "// Package share manages share relations.\n// share: manages personal shares and provide high level permission check functions.\npackage share\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\ntype group struct {\n\tid            int\n\tgroupName     string\n\tcreatorName   string\n\ttimestamp     int64\n\tparentGroupID int\n}\n\nvar ccnetDB *sql.DB\nvar seafileDB *sql.DB\nvar groupTableName string\nvar cloudMode bool\n\n// Init ccnetDB, seafileDB, groupTableName, cloudMode\nfunc Init(cnDB *sql.DB, seafDB *sql.DB, grpTableName string, clMode bool) {\n\tccnetDB = cnDB\n\tseafileDB = seafDB\n\tgroupTableName = grpTableName\n\tcloudMode = clMode\n}\n\n// CheckPerm get user's repo permission\nfunc CheckPerm(repoID string, user string) string {\n\tvar perm string\n\tvInfo, err := repomgr.GetVirtualRepoInfo(repoID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get virtual repo info by repo id %s: %v\", repoID, err)\n\t}\n\tif vInfo != nil {\n\t\tperm = checkVirtualRepoPerm(repoID, vInfo.OriginRepoID, user, vInfo.Path)\n\t\treturn perm\n\t}\n\n\tperm = checkRepoSharePerm(repoID, user)\n\n\treturn perm\n}\n\nfunc checkVirtualRepoPerm(repoID, originRepoID, user, vPath string) string {\n\towner, err := repomgr.GetRepoOwner(originRepoID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get repo owner: %v\", err)\n\t}\n\tvar perm string\n\tif owner != \"\" && owner == user {\n\t\tperm = \"rw\"\n\t\treturn perm\n\t}\n\tperm = checkPermOnParentRepo(originRepoID, user, vPath)\n\tif perm != \"\" {\n\t\treturn perm\n\t}\n\tperm = checkRepoSharePerm(originRepoID, user)\n\treturn perm\n}\n\nfunc getUserGroups(sqlStr string, args ...interface{}) ([]group, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := ccnetDB.QueryContext(ctx, sqlStr, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar groups []group\n\tvar g group\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&g.id, &g.groupName,\n\t\t\t&g.creatorName, &g.timestamp,\n\t\t\t&g.parentGroupID); err == nil {\n\n\t\t\tgroups = append(groups, g)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn groups, nil\n}\n\nfunc getGroupsByUser(userName string, returnAncestors bool) ([]group, error) {\n\tsqlStr := fmt.Sprintf(\"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"+\n\t\t\"`%s` g, GroupUser u WHERE g.group_id = u.group_id AND user_name=? ORDER BY g.group_id DESC\",\n\t\tgroupTableName)\n\tgroups, err := getUserGroups(sqlStr, userName)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get groups by user %s: %v\", userName, err)\n\t\treturn nil, err\n\t}\n\tif !returnAncestors {\n\t\treturn groups, nil\n\t}\n\n\tsqlStr = \"\"\n\tvar ret []group\n\tfor _, group := range groups {\n\t\tparentGroupID := group.parentGroupID\n\t\tgroupID := group.id\n\t\tif parentGroupID != 0 {\n\t\t\tif sqlStr == \"\" {\n\t\t\t\tsqlStr = fmt.Sprintf(\"SELECT path FROM GroupStructure WHERE group_id IN (%d\",\n\t\t\t\t\tgroupID)\n\t\t\t} else {\n\t\t\t\tsqlStr += fmt.Sprintf(\", %d\", groupID)\n\t\t\t}\n\t\t} else {\n\t\t\tret = append(ret, group)\n\t\t}\n\t}\n\tif sqlStr != \"\" {\n\t\tsqlStr += \")\"\n\t\tpaths, err := getGroupPaths(sqlStr)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get group paths: %v\", err)\n\t\t}\n\t\tif paths == \"\" {\n\t\t\terr := fmt.Errorf(\"Failed to get groups path for user %s\", userName)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsqlStr = fmt.Sprintf(\"SELECT g.group_id, group_name, creator_name, timestamp, parent_group_id FROM \"+\n\t\t\t\"`%s` g WHERE g.group_id IN (%s) ORDER BY g.group_id DESC\",\n\t\t\tgroupTableName, paths)\n\t\tgroups, err := getUserGroups(sqlStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, groups...)\n\t}\n\treturn ret, nil\n}\n\nfunc getGroupPaths(sqlStr string) (string, error) {\n\tvar paths string\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := ccnetDB.QueryContext(ctx, sqlStr)\n\tif err != nil {\n\t\treturn paths, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar path string\n\tfor rows.Next() {\n\t\trows.Scan(&path)\n\t\tif paths == \"\" {\n\t\t\tpaths = path\n\t\t} else {\n\t\t\tpaths += fmt.Sprintf(\", %s\", path)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn paths, nil\n}\n\nfunc checkGroupPermByUser(repoID string, userName string) (string, error) {\n\tgroups, err := getGroupsByUser(userName, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(groups) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tvar sqlBuilder strings.Builder\n\tsqlBuilder.WriteString(\"SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (\")\n\tfor i := 0; i < len(groups); i++ {\n\t\tsqlBuilder.WriteString(strconv.Itoa(groups[i].id))\n\t\tif i+1 < len(groups) {\n\t\t\tsqlBuilder.WriteString(\",\")\n\t\t}\n\t}\n\tsqlBuilder.WriteString(\")\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := seafileDB.QueryContext(ctx, sqlBuilder.String(), repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get group permission by user %s: %v\", userName, err)\n\t\treturn \"\", err\n\t}\n\n\tdefer rows.Close()\n\n\tvar perm string\n\tvar origPerm string\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&perm); err == nil {\n\t\t\tif perm == \"rw\" {\n\t\t\t\torigPerm = perm\n\t\t\t} else if perm == \"r\" && origPerm == \"\" {\n\t\t\t\torigPerm = perm\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\terr := fmt.Errorf(\"Failed to get group permission for user %s: %v\", userName, err)\n\t\treturn \"\", err\n\t}\n\n\treturn origPerm, nil\n}\n\nfunc checkSharedRepoPerm(repoID string, email string) (string, error) {\n\tsqlStr := \"SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID, email)\n\n\tvar perm string\n\tif err := row.Scan(&perm); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\terr := fmt.Errorf(\"Failed to check shared repo permission: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn perm, nil\n}\n\nfunc checkInnerPubRepoPerm(repoID string) (string, error) {\n\tsqlStr := \"SELECT permission FROM InnerPubRepo WHERE repo_id=?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\n\tvar perm string\n\tif err := row.Scan(&perm); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\terr := fmt.Errorf(\"Failed to check inner public repo permission: %v\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn perm, nil\n}\n\nfunc checkRepoSharePerm(repoID string, userName string) string {\n\towner, err := repomgr.GetRepoOwner(repoID)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get repo owner: %v\", err)\n\t}\n\tif owner != \"\" && owner == userName {\n\t\tperm := \"rw\"\n\t\treturn perm\n\t}\n\tperm, err := checkSharedRepoPerm(repoID, userName)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get shared repo permission: %v\", err)\n\t}\n\tif perm != \"\" {\n\t\treturn perm\n\t}\n\tperm, err = checkGroupPermByUser(repoID, userName)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get group permission by user %s: %v\", userName, err)\n\t}\n\tif perm != \"\" {\n\t\treturn perm\n\t}\n\tif !cloudMode {\n\t\tperm, err = checkInnerPubRepoPerm(repoID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get inner pulic repo permission by repo id %s: %v\", repoID, err)\n\t\t\treturn \"\"\n\t\t}\n\t\treturn perm\n\t}\n\treturn \"\"\n}\n\nfunc getSharedDirsToUser(originRepoID string, toEmail string) (map[string]string, error) {\n\tdirs := make(map[string]string)\n\tsqlStr := \"SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE \" +\n\t\t\"s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := seafileDB.QueryContext(ctx, sqlStr, toEmail, originRepoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get shared directories by user %s: %v\", toEmail, err)\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar path string\n\tvar perm string\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&path, &perm); err == nil {\n\t\t\tdirs[path] = perm\n\t\t}\n\t}\n\tif err := rows.Err(); err != nil {\n\t\terr := fmt.Errorf(\"Failed to get shared directories by user %s: %v\", toEmail, err)\n\t\treturn nil, err\n\t}\n\n\treturn dirs, nil\n}\n\nfunc getDirPerm(perms map[string]string, path string) string {\n\ttmp := path\n\tvar perm string\n\t// If the path is empty, filepath.Dir returns \".\". If the path consists entirely of separators,\n\t// filepath.Dir returns a single separator.\n\tfor tmp != \"/\" && tmp != \".\" && tmp != \"\" {\n\t\tif perm, exists := perms[tmp]; exists {\n\t\t\treturn perm\n\t\t}\n\t\ttmp = filepath.Dir(tmp)\n\t}\n\treturn perm\n}\n\nfunc convertGroupListToStr(groups []group) string {\n\tvar groupIDs strings.Builder\n\n\tfor i, group := range groups {\n\t\tgroupIDs.WriteString(strconv.Itoa(group.id))\n\t\tif i+1 < len(groups) {\n\t\t\tgroupIDs.WriteString(\",\")\n\t\t}\n\t}\n\treturn groupIDs.String()\n}\n\nfunc getSharedDirsToGroup(originRepoID string, groups []group) (map[string]string, error) {\n\tdirs := make(map[string]string)\n\tgroupIDs := convertGroupListToStr(groups)\n\n\tsqlStr := fmt.Sprintf(\"SELECT v.path, s.permission \"+\n\t\t\"FROM RepoGroup s, VirtualRepo v WHERE \"+\n\t\t\"s.repo_id = v.repo_id AND v.origin_repo = ? \"+\n\t\t\"AND s.group_id in (%s)\", groupIDs)\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := seafileDB.QueryContext(ctx, sqlStr, originRepoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get shared directories: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar path string\n\tvar perm string\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&path, &perm); err == nil {\n\t\t\tdirs[path] = perm\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\terr := fmt.Errorf(\"Failed to get shared directories: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn dirs, nil\n}\n\nfunc checkPermOnParentRepo(originRepoID, user, vPath string) string {\n\tvar perm string\n\tuserPerms, err := getSharedDirsToUser(originRepoID, user)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get all shared folder perms in parent repo %.8s for user %s\", originRepoID, user)\n\t\treturn \"\"\n\t}\n\tif len(userPerms) > 0 {\n\t\tperm = getDirPerm(userPerms, vPath)\n\t\tif perm != \"\" {\n\t\t\treturn perm\n\t\t}\n\t}\n\n\tgroups, err := getGroupsByUser(user, false)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get groups by user %s: %v\", user, err)\n\t}\n\tif len(groups) == 0 {\n\t\treturn perm\n\t}\n\n\tgroupPerms, err := getSharedDirsToGroup(originRepoID, groups)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get all shared folder perm from parent repo %.8s to all user groups\", originRepoID)\n\t\treturn \"\"\n\t}\n\tif len(groupPerms) == 0 {\n\t\treturn \"\"\n\t}\n\n\tperm = getDirPerm(groupPerms, vPath)\n\n\treturn perm\n}\n\n// SharedRepo is a shared repo object\ntype SharedRepo struct {\n\tVersion      int    `json:\"version\"`\n\tID           string `json:\"id\"`\n\tHeadCommitID string `json:\"head_commit_id\"`\n\tName         string `json:\"name\"`\n\tMTime        int64  `json:\"mtime\"`\n\tPermission   string `json:\"permission\"`\n\tType         string `json:\"type\"`\n\tOwner        string `json:\"owner\"`\n\tRepoType     string `json:\"-\"`\n}\n\n// GetReposByOwner get repos by owner\nfunc GetReposByOwner(email string) ([]*SharedRepo, error) {\n\tvar repos []*SharedRepo\n\n\tquery := \"SELECT o.repo_id, b.commit_id, i.name, \" +\n\t\t\"i.version, i.update_time, i.last_modifier, i.type FROM \" +\n\t\t\"RepoOwner o LEFT JOIN Branch b ON o.repo_id = b.repo_id \" +\n\t\t\"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id \" +\n\t\t\"LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id \" +\n\t\t\"WHERE owner_id=? AND \" +\n\t\t\"v.repo_id IS NULL \" +\n\t\t\"ORDER BY i.update_time DESC, o.repo_id\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tstmt, err := seafileDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx, email)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\trepo := new(SharedRepo)\n\t\tvar repoName, lastModifier, repoType sql.NullString\n\t\tif err := rows.Scan(&repo.ID, &repo.HeadCommitID,\n\t\t\t&repoName, &repo.Version, &repo.MTime,\n\t\t\t&lastModifier, &repoType); err == nil {\n\n\t\t\tif repo.HeadCommitID == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !repoName.Valid || !lastModifier.Valid {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif repoName.String == \"\" || lastModifier.String == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trepo.Name = repoName.String\n\t\t\tif repoType.Valid {\n\t\t\t\trepo.RepoType = repoType.String\n\t\t\t}\n\t\t\trepos = append(repos, repo)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn repos, nil\n}\n\n// ListInnerPubRepos get inner public repos\nfunc ListInnerPubRepos() ([]*SharedRepo, error) {\n\tquery := \"SELECT InnerPubRepo.repo_id, \" +\n\t\t\"owner_id, permission, commit_id, i.name, \" +\n\t\t\"i.update_time, i.version, i.type \" +\n\t\t\"FROM InnerPubRepo \" +\n\t\t\"LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id, RepoOwner, Branch \" +\n\t\t\"WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND \" +\n\t\t\"InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tstmt, err := seafileDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tvar repos []*SharedRepo\n\tfor rows.Next() {\n\t\trepo := new(SharedRepo)\n\t\tvar repoName, repoType sql.NullString\n\t\tif err := rows.Scan(&repo.ID, &repo.Owner,\n\t\t\t&repo.Permission, &repo.HeadCommitID, &repoName,\n\t\t\t&repo.MTime, &repo.Version, &repoType); err == nil {\n\n\t\t\tif !repoName.Valid {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif repoName.String == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trepo.Name = repoName.String\n\t\t\tif repoType.Valid {\n\t\t\t\trepo.RepoType = repoType.String\n\t\t\t}\n\t\t\trepos = append(repos, repo)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn repos, nil\n}\n\n// ListShareRepos list share repos by email\nfunc ListShareRepos(email, columnType string) ([]*SharedRepo, error) {\n\tvar repos []*SharedRepo\n\tvar query string\n\tif columnType == \"from_email\" {\n\t\tquery = \"SELECT sh.repo_id, to_email, \" +\n\t\t\t\"permission, commit_id, \" +\n\t\t\t\"i.name, i.update_time, i.version, i.type FROM \" +\n\t\t\t\"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b \" +\n\t\t\t\"WHERE from_email=? AND \" +\n\t\t\t\"sh.repo_id = b.repo_id AND \" +\n\t\t\t\"b.name = 'master' \" +\n\t\t\t\"ORDER BY i.update_time DESC, sh.repo_id\"\n\t} else if columnType == \"to_email\" {\n\t\tquery = \"SELECT sh.repo_id, from_email, \" +\n\t\t\t\"permission, commit_id, \" +\n\t\t\t\"i.name, i.update_time, i.version, i.type FROM \" +\n\t\t\t\"SharedRepo sh LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id, Branch b \" +\n\t\t\t\"WHERE to_email=? AND \" +\n\t\t\t\"sh.repo_id = b.repo_id AND \" +\n\t\t\t\"b.name = 'master' \" +\n\t\t\t\"ORDER BY i.update_time DESC, sh.repo_id\"\n\t} else {\n\t\terr := fmt.Errorf(\"Wrong column type: %s\", columnType)\n\t\treturn nil, err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\tstmt, err := seafileDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx, email)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\trepo := new(SharedRepo)\n\t\tvar repoName, repoType sql.NullString\n\t\tif err := rows.Scan(&repo.ID, &repo.Owner,\n\t\t\t&repo.Permission, &repo.HeadCommitID,\n\t\t\t&repoName, &repo.MTime, &repo.Version, &repoType); err == nil {\n\n\t\t\tif !repoName.Valid {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif repoName.String == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trepo.Name = repoName.String\n\t\t\tif repoType.Valid {\n\t\t\t\trepo.RepoType = repoType.String\n\t\t\t}\n\n\t\t\trepos = append(repos, repo)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn repos, nil\n}\n\n// GetGroupReposByUser get group repos by user\nfunc GetGroupReposByUser(user string, orgID int) ([]*SharedRepo, error) {\n\tgroups, err := getGroupsByUser(user, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(groups) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar sqlBuilder strings.Builder\n\tif orgID < 0 {\n\t\tsqlBuilder.WriteString(\"SELECT g.repo_id, \" +\n\t\t\t\"user_name, permission, commit_id, \" +\n\t\t\t\"i.name, i.update_time, i.version, i.type \" +\n\t\t\t\"FROM RepoGroup g \" +\n\t\t\t\"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, \" +\n\t\t\t\"Branch b WHERE g.repo_id = b.repo_id AND \" +\n\t\t\t\"b.name = 'master' AND group_id IN (\")\n\t} else {\n\t\tsqlBuilder.WriteString(\"SELECT g.repo_id, \" +\n\t\t\t\"owner, permission, commit_id, \" +\n\t\t\t\"i.name, i.update_time, i.version, i.type \" +\n\t\t\t\"FROM OrgGroupRepo g \" +\n\t\t\t\"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id, \" +\n\t\t\t\"Branch b WHERE g.repo_id = b.repo_id AND \" +\n\t\t\t\"b.name = 'master' AND group_id IN (\")\n\t}\n\n\tfor i := 0; i < len(groups); i++ {\n\t\tsqlBuilder.WriteString(strconv.Itoa(groups[i].id))\n\t\tif i+1 < len(groups) {\n\t\t\tsqlBuilder.WriteString(\",\")\n\t\t}\n\t}\n\tsqlBuilder.WriteString(\" ) ORDER BY group_id\")\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := seafileDB.QueryContext(ctx, sqlBuilder.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar repos []*SharedRepo\n\tfor rows.Next() {\n\t\tgRepo := new(SharedRepo)\n\t\tvar repoType sql.NullString\n\t\tif err := rows.Scan(&gRepo.ID, &gRepo.Owner,\n\t\t\t&gRepo.Permission, &gRepo.HeadCommitID,\n\t\t\t&gRepo.Name, &gRepo.MTime, &gRepo.Version, &repoType); err == nil {\n\t\t\tif repoType.Valid {\n\t\t\t\tgRepo.RepoType = repoType.String\n\t\t\t}\n\t\t\trepos = append(repos, gRepo)\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn repos, nil\n}\n"
  },
  {
    "path": "fileserver/size_sched.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"gopkg.in/ini.v1\"\n\n\t\"database/sql\"\n\n\t\"github.com/go-redis/redis/v8\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/diff\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/workerpool\"\n\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tRepoSizeList = \"repo_size_task\"\n)\n\nvar updateSizePool *workerpool.WorkPool\nvar redisClient *redis.Client\n\nfunc sizeSchedulerInit() {\n\tvar n int = 1\n\tvar seafileConfPath string\n\tif centralDir != \"\" {\n\t\tseafileConfPath = filepath.Join(centralDir, \"seafile.conf\")\n\t} else {\n\t\tseafileConfPath = filepath.Join(absDataDir, \"seafile.conf\")\n\t}\n\tconfig, err := ini.Load(seafileConfPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load seafile.conf: %v\", err)\n\t}\n\tif section, err := config.GetSection(\"scheduler\"); err == nil {\n\t\tif key, err := section.GetKey(\"size_sched_thread_num\"); err == nil {\n\t\t\tnum, err := key.Int()\n\t\t\tif err == nil {\n\t\t\t\tn = num\n\t\t\t}\n\t\t}\n\t}\n\tupdateSizePool = workerpool.CreateWorkerPool(computeRepoSize, n)\n\n\tserver := fmt.Sprintf(\"%s:%d\", option.RedisHost, option.RedisPort)\n\topt := &redis.Options{\n\t\tAddr:     server,\n\t\tPassword: option.RedisPasswd,\n\t}\n\topt.PoolSize = n\n\n\tredisClient = redis.NewClient(opt)\n\n}\n\nfunc computeRepoSize(args ...interface{}) error {\n\tif len(args) < 1 {\n\t\treturn nil\n\t}\n\trepoID := args[0].(string)\n\tvar size int64\n\tvar fileCount int64\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %s\", repoID)\n\t\treturn err\n\t}\n\n\tinfo, err := getOldRepoInfo(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get old repo info: %v\", err)\n\t\treturn err\n\t}\n\n\tif info != nil && info.HeadID == repo.HeadCommitID {\n\t\treturn nil\n\t}\n\n\thead, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get head commit %s\", repo.HeadCommitID)\n\t\treturn err\n\t}\n\n\tvar oldHead *commitmgr.Commit\n\tif info != nil {\n\t\tcommit, _ := commitmgr.Load(repo.ID, info.HeadID)\n\t\toldHead = commit\n\t}\n\n\tif info != nil && oldHead != nil {\n\t\tvar results []*diff.DiffEntry\n\t\tvar changeSize int64\n\t\tvar changeFileCount int64\n\t\terr := diff.DiffCommits(oldHead, head, &results, false)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to do diff commits: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, de := range results {\n\t\t\tif de.Status == diff.DiffStatusDeleted {\n\t\t\t\tchangeSize -= de.Size\n\t\t\t\tchangeFileCount--\n\t\t\t} else if de.Status == diff.DiffStatusAdded {\n\t\t\t\tchangeSize += de.Size\n\t\t\t\tchangeFileCount++\n\t\t\t} else if de.Status == diff.DiffStatusModified {\n\t\t\t\tchangeSize = changeSize + de.Size - de.OriginSize\n\t\t\t}\n\t\t}\n\t\tsize = info.Size + changeSize\n\t\tfileCount = info.FileCount + changeFileCount\n\t} else {\n\t\tinfo, err := fsmgr.GetFileCountInfoByPath(repo.StoreID, repo.RootID, \"/\")\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to get file count\")\n\t\t\treturn err\n\t\t}\n\n\t\tfileCount = info.FileCount\n\t\tsize = info.Size\n\t}\n\n\terr = setRepoSizeAndFileCount(repoID, repo.HeadCommitID, size, fileCount)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to set repo size and file count %s: %v\", repoID, err)\n\t\treturn err\n\t}\n\n\terr = notifyRepoSizeChange(repo.StoreID)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to notify repo size change for repo %s: %v\", repoID, err)\n\t}\n\n\treturn nil\n}\n\nfunc setRepoSizeAndFileCount(repoID, newHeadID string, size, fileCount int64) error {\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\ttrans, err := seafileDB.BeginTx(ctx, nil)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to start transaction: %v\", err)\n\t\treturn err\n\t}\n\n\tvar headID string\n\tsqlStr := \"SELECT head_id FROM RepoSize WHERE repo_id=?\"\n\n\trow := trans.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&headID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif headID == \"\" {\n\t\tsqlStr := \"INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)\"\n\t\t_, err = trans.ExecContext(ctx, sqlStr, repoID, size, newHeadID)\n\t\tif err != nil {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsqlStr = \"UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?\"\n\t\t_, err = trans.ExecContext(ctx, sqlStr, size, newHeadID, repoID)\n\t\tif err != nil {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar exist int\n\tsqlStr = \"SELECT 1 FROM RepoFileCount WHERE repo_id=?\"\n\trow = trans.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&exist); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif exist != 0 {\n\t\tsqlStr := \"UPDATE RepoFileCount SET file_count=? WHERE repo_id=?\"\n\t\t_, err = trans.ExecContext(ctx, sqlStr, fileCount, repoID)\n\t\tif err != nil {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tsqlStr := \"INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)\"\n\t\t_, err = trans.ExecContext(ctx, sqlStr, repoID, fileCount)\n\t\tif err != nil {\n\t\t\ttrans.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttrans.Commit()\n\n\treturn nil\n}\n\ntype RepoSizeChangeTask struct {\n\tRepoID string `json:\"repo_id\"`\n}\n\nfunc notifyRepoSizeChange(repoID string) error {\n\tif !option.HasRedisOptions {\n\t\treturn nil\n\t}\n\n\ttask := &RepoSizeChangeTask{RepoID: repoID}\n\n\tdata, err := json.Marshal(task)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to encode repo size change task: %w\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\tdefer cancel()\n\n\terr = redisClient.LPush(ctx, RepoSizeList, data).Err()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to push message to redis list %s: %w\", RepoSizeList, err)\n\t}\n\n\treturn nil\n}\n\n// RepoInfo contains repo information.\ntype RepoInfo struct {\n\tHeadID    string\n\tSize      int64\n\tFileCount int64\n}\n\nfunc getOldRepoInfo(repoID string) (*RepoInfo, error) {\n\tsqlStr := \"select s.head_id,s.size,f.file_count FROM RepoSize s LEFT JOIN RepoFileCount f ON \" +\n\t\t\"s.repo_id=f.repo_id WHERE s.repo_id=?\"\n\n\trepoInfo := new(RepoInfo)\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&repoInfo.HeadID, &repoInfo.Size, &repoInfo.FileCount); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, nil\n\t}\n\n\treturn repoInfo, nil\n}\n"
  },
  {
    "path": "fileserver/sync_api.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"database/sql\"\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gorilla/mux\"\n\t\"github.com/haiwen/seafile-server/fileserver/blockmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/diff\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/share\"\n\t\"github.com/haiwen/seafile-server/fileserver/utils\"\n\t\"github.com/haiwen/seafile-server/fileserver/workerpool\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\ntype checkExistType int32\n\nconst (\n\tcheckFSExist    checkExistType = 0\n\tcheckBlockExist checkExistType = 1\n)\n\nconst (\n\tseafileServerChannelEvent  = \"seaf_server.event\"\n\tseafileServerChannelStats  = \"seaf_server.stats\"\n\temptySHA1                  = \"0000000000000000000000000000000000000000\"\n\ttokenExpireTime            = 7200\n\tpermExpireTime             = 7200\n\tvirtualRepoExpireTime      = 7200\n\tsyncAPICleaningIntervalSec = 300\n\tmaxObjectPackSize          = 1 << 20 // 1MB\n\tfsIdWorkers                = 10\n)\n\nvar (\n\ttokenCache           sync.Map\n\tpermCache            sync.Map\n\tvirtualRepoInfoCache sync.Map\n\tcalFsIdPool          *workerpool.WorkPool\n)\n\ntype tokenInfo struct {\n\trepoID     string\n\temail      string\n\texpireTime int64\n}\n\ntype permInfo struct {\n\tperm       string\n\texpireTime int64\n}\n\ntype virtualRepoInfo struct {\n\tstoreID    string\n\texpireTime int64\n}\n\ntype repoEventData struct {\n\teType      string\n\tuser       string\n\tip         string\n\trepoID     string\n\tpath       string\n\tclientName string\n}\n\ntype statsEventData struct {\n\teType  string\n\tuser   string\n\trepoID string\n\tbytes  uint64\n}\n\nfunc syncAPIInit() {\n\tticker := time.NewTicker(time.Second * syncAPICleaningIntervalSec)\n\tgo RecoverWrapper(func() {\n\t\tfor range ticker.C {\n\t\t\tremoveSyncAPIExpireCache()\n\t\t}\n\t})\n\n\tcalFsIdPool = workerpool.CreateWorkerPool(getFsId, fsIdWorkers)\n}\n\ntype calResult struct {\n\tuser string\n\terr  *appError\n}\n\nfunc getFsId(args ...interface{}) error {\n\tif len(args) < 3 {\n\t\treturn nil\n\t}\n\n\tresChan := args[0].(chan *calResult)\n\trsp := args[1].(http.ResponseWriter)\n\tr := args[2].(*http.Request)\n\n\tqueries := r.URL.Query()\n\n\tserverHead := queries.Get(\"server-head\")\n\tif !utils.IsObjectIDValid(serverHead) {\n\t\tmsg := \"Invalid server-head parameter.\"\n\t\tappErr := &appError{nil, msg, http.StatusBadRequest}\n\t\tresChan <- &calResult{\"\", appErr}\n\t\treturn nil\n\t}\n\n\tclientHead := queries.Get(\"client-head\")\n\tif clientHead != \"\" && !utils.IsObjectIDValid(clientHead) {\n\t\tmsg := \"Invalid client-head parameter.\"\n\t\tappErr := &appError{nil, msg, http.StatusBadRequest}\n\t\tresChan <- &calResult{\"\", appErr}\n\t\treturn nil\n\t}\n\n\tdirOnlyArg := queries.Get(\"dir-only\")\n\tvar dirOnly bool\n\tif dirOnlyArg != \"\" {\n\t\tdirOnly = true\n\t}\n\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\tresChan <- &calResult{user, appErr}\n\t\treturn nil\n\t}\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\tresChan <- &calResult{user, appErr}\n\t\treturn nil\n\t}\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"Failed to find repo %.8s\", repoID)\n\t\tappErr := &appError{err, \"\", http.StatusInternalServerError}\n\t\tresChan <- &calResult{user, appErr}\n\t\treturn nil\n\t}\n\tret, err := calculateSendObjectList(r.Context(), repo, serverHead, clientHead, dirOnly)\n\tif err != nil {\n\t\tif !errors.Is(err, context.Canceled) {\n\t\t\terr := fmt.Errorf(\"Failed to get fs id list: %w\", err)\n\t\t\tappErr := &appError{err, \"\", http.StatusInternalServerError}\n\t\t\tresChan <- &calResult{user, appErr}\n\t\t\treturn nil\n\t\t}\n\t\tappErr := &appError{nil, \"\", http.StatusInternalServerError}\n\t\tresChan <- &calResult{user, appErr}\n\t\treturn nil\n\t}\n\n\tvar objList []byte\n\tif ret != nil {\n\t\tobjList, err = json.Marshal(ret)\n\t\tif err != nil {\n\t\t\tappErr := &appError{err, \"\", http.StatusInternalServerError}\n\t\t\tresChan <- &calResult{user, appErr}\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t// when get obj list is nil, return []\n\t\tobjList = []byte{'[', ']'}\n\t}\n\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(len(objList)))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(objList)\n\n\tresChan <- &calResult{user, nil}\n\n\treturn nil\n}\n\nfunc permissionCheckCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tqueries := r.URL.Query()\n\n\top := queries.Get(\"op\")\n\tif op != \"download\" && op != \"upload\" {\n\t\tmsg := \"op is invalid\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tclientID := queries.Get(\"client_id\")\n\tif clientID != \"\" && len(clientID) != 40 {\n\t\tmsg := \"client_id is invalid\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tclientVer := queries.Get(\"client_ver\")\n\tif clientVer != \"\" {\n\t\tstatus := validateClientVer(clientVer)\n\t\tif status != http.StatusOK {\n\t\t\tmsg := \"client_ver is invalid\"\n\t\t\treturn &appError{nil, msg, status}\n\t\t}\n\t}\n\n\tclientName := queries.Get(\"client_name\")\n\tif clientName != \"\" {\n\t\tclientName = html.UnescapeString(clientName)\n\t}\n\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\trepo := repomgr.GetEx(repoID)\n\tif repo == nil {\n\t\tmsg := \"repo was deleted\"\n\t\treturn &appError{nil, msg, seafHTTPResRepoDeleted}\n\t}\n\n\tif repo.IsCorrupted {\n\t\tmsg := \"repo was corrupted\"\n\t\treturn &appError{nil, msg, seafHTTPResRepoCorrupted}\n\t}\n\n\tuser, err := validateToken(r, repoID, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = checkPermission(repoID, user, op, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tip := getClientIPAddr(r)\n\tif ip == \"\" {\n\t\ttoken := r.Header.Get(\"Seafile-Repo-Token\")\n\t\terr := fmt.Errorf(\"%s failed to get client ip\", token)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tif op == \"download\" {\n\t\tonRepoOper(\"repo-download-sync\", repoID, user, ip, clientName)\n\t}\n\tif clientID != \"\" && clientName != \"\" {\n\t\ttoken := r.Header.Get(\"Seafile-Repo-Token\")\n\t\texists, err := repomgr.TokenPeerInfoExists(token)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to check whether token %s peer info exist: %v\", token, err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tif !exists {\n\t\t\tif err := repomgr.AddTokenPeerInfo(token, clientID, ip, clientName, clientVer, int64(time.Now().Unix())); err != nil {\n\t\t\t\terr := fmt.Errorf(\"Failed to add token peer info: %v\", err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t} else {\n\t\t\tif err := repomgr.UpdateTokenPeerInfo(token, clientID, clientVer, int64(time.Now().Unix())); err != nil {\n\t\t\t\terr := fmt.Errorf(\"Failed to update token peer info: %v\", err)\n\t\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\nfunc getBlockMapCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tfileID := vars[\"id\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tseafile, err := fsmgr.GetSeafile(storeID, fileID)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to get seafile object by file id %s: %v\", fileID, err)\n\t\treturn &appError{nil, msg, http.StatusNotFound}\n\t}\n\n\tvar blockSizes []int64\n\tfor _, blockID := range seafile.BlkIDs {\n\t\tblockSize, err := blockmgr.Stat(storeID, blockID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to find block %s/%s\", storeID, blockID)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tblockSizes = append(blockSizes, blockSize)\n\t}\n\n\tvar data []byte\n\tif blockSizes != nil {\n\t\tdata, err = json.Marshal(blockSizes)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to marshal json: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t} else {\n\t\tdata = []byte{'[', ']'}\n\t}\n\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data)\n\n\treturn nil\n}\n\nfunc getAccessibleRepoListCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tqueries := r.URL.Query()\n\trepoID := queries.Get(\"repo_id\")\n\n\tif repoID == \"\" || !utils.IsValidUUID(repoID) {\n\t\tmsg := \"Invalid repo id.\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tobtainedRepos := make(map[string]string)\n\n\trepos, err := share.GetReposByOwner(user)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repos by owner %s: %v\", user, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tvar repoObjects []*share.SharedRepo\n\tfor _, repo := range repos {\n\t\tif repo.RepoType != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := obtainedRepos[repo.ID]; !ok {\n\t\t\tobtainedRepos[repo.ID] = repo.ID\n\t\t}\n\t\trepo.Permission = \"rw\"\n\t\trepo.Type = \"repo\"\n\t\trepo.Owner = user\n\t\trepoObjects = append(repoObjects, repo)\n\t}\n\n\trepos, err = share.ListShareRepos(user, \"to_email\")\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get share repos by user %s: %v\", user, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tfor _, sRepo := range repos {\n\t\tif _, ok := obtainedRepos[sRepo.ID]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tif sRepo.RepoType != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tsRepo.Type = \"srepo\"\n\t\tsRepo.Owner = strings.ToLower(sRepo.Owner)\n\t\trepoObjects = append(repoObjects, sRepo)\n\t}\n\n\trepos, err = share.GetGroupReposByUser(user, -1)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get group repos by user %s: %v\", user, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\treposTable := filterGroupRepos(repos)\n\n\tfor _, gRepo := range reposTable {\n\t\tif _, ok := obtainedRepos[gRepo.ID]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tgRepo.Type = \"grepo\"\n\t\tgRepo.Owner = strings.ToLower(gRepo.Owner)\n\t\trepoObjects = append(repoObjects, gRepo)\n\t}\n\n\trepos, err = share.ListInnerPubRepos()\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get inner public repos: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tfor _, sRepo := range repos {\n\t\tif _, ok := obtainedRepos[sRepo.ID]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tif sRepo.RepoType != \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tsRepo.Type = \"grepo\"\n\t\tsRepo.Owner = \"Organization\"\n\t\trepoObjects = append(repoObjects, sRepo)\n\t}\n\n\tvar data []byte\n\tif repoObjects != nil {\n\t\tdata, err = json.Marshal(repoObjects)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to marshal json: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t} else {\n\t\tdata = []byte{'[', ']'}\n\t}\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data)\n\treturn nil\n}\n\nfunc filterGroupRepos(repos []*share.SharedRepo) map[string]*share.SharedRepo {\n\ttable := make(map[string]*share.SharedRepo)\n\n\tfor _, repo := range repos {\n\t\tif repo.RepoType != \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif repoPrev, ok := table[repo.ID]; ok {\n\t\t\tif repo.Permission == \"rw\" && repoPrev.Permission == \"r\" {\n\t\t\t\ttable[repo.ID] = repo\n\t\t\t}\n\t\t} else {\n\t\t\ttable[repo.ID] = repo\n\t\t}\n\t}\n\n\treturn table\n}\n\nfunc recvFSCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tappErr = checkPermission(repoID, user, \"upload\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tfsBuf, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn &appError{nil, err.Error(), http.StatusBadRequest}\n\t}\n\n\tfor len(fsBuf) > 44 {\n\t\tobjID := string(fsBuf[:40])\n\t\tif !utils.IsObjectIDValid(objID) {\n\t\t\tmsg := fmt.Sprintf(\"Fs obj id %s is invalid\", objID)\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tvar objSize uint32\n\t\tsizeBuffer := bytes.NewBuffer(fsBuf[40:44])\n\t\tif err := binary.Read(sizeBuffer, binary.BigEndian, &objSize); err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Failed to read fs obj size: %v\", err)\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tif len(fsBuf) < int(44+objSize) {\n\t\t\tmsg := \"Request body size invalid\"\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\n\t\tobjBuffer := bytes.NewBuffer(fsBuf[44 : 44+objSize])\n\t\tif err := fsmgr.WriteRaw(storeID, objID, objBuffer); err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to write fs obj %s:%s : %v\", storeID, objID, err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\tfsBuf = fsBuf[44+objSize:]\n\t}\n\tif len(fsBuf) == 0 {\n\t\trsp.WriteHeader(http.StatusOK)\n\t\treturn nil\n\t}\n\n\tmsg := \"Request body size invalid\"\n\treturn &appError{nil, msg, http.StatusBadRequest}\n}\nfunc checkFSCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\treturn postCheckExistCB(rsp, r, checkFSExist)\n}\n\nfunc checkBlockCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\treturn postCheckExistCB(rsp, r, checkBlockExist)\n}\n\nfunc postCheckExistCB(rsp http.ResponseWriter, r *http.Request, existType checkExistType) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tvar objIDList []string\n\tif err := json.NewDecoder(r.Body).Decode(&objIDList); err != nil {\n\t\treturn &appError{nil, err.Error(), http.StatusBadRequest}\n\t}\n\n\tvar neededObjs []string\n\tvar ret bool\n\tfor i := 0; i < len(objIDList); i++ {\n\t\tif !utils.IsObjectIDValid(objIDList[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tif existType == checkFSExist {\n\t\t\tret, _ = fsmgr.Exists(storeID, objIDList[i])\n\t\t} else if existType == checkBlockExist {\n\t\t\tret = blockmgr.Exists(storeID, objIDList[i])\n\t\t}\n\t\tif !ret {\n\t\t\tneededObjs = append(neededObjs, objIDList[i])\n\t\t}\n\t}\n\n\tvar data []byte\n\tif neededObjs != nil {\n\t\tdata, err = json.Marshal(neededObjs)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to marshal json: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t} else {\n\t\tdata = []byte{'[', ']'}\n\t}\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data)\n\n\treturn nil\n}\n\nfunc packFSCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tvar fsIDList []string\n\tif err := json.NewDecoder(r.Body).Decode(&fsIDList); err != nil {\n\t\treturn &appError{nil, err.Error(), http.StatusBadRequest}\n\t}\n\n\tvar totalSize int\n\tvar data bytes.Buffer\n\tfor i := 0; i < len(fsIDList); i++ {\n\t\tif !utils.IsObjectIDValid(fsIDList[i]) {\n\t\t\tmsg := fmt.Sprintf(\"Invalid fs id %s\", fsIDList[i])\n\t\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t\tdata.WriteString(fsIDList[i])\n\t\tvar tmp bytes.Buffer\n\t\tif err := fsmgr.ReadRaw(storeID, fsIDList[i], &tmp); err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to read fs %s:%s: %v\", storeID, fsIDList[i], err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t\ttmpLen := make([]byte, 4)\n\t\tbinary.BigEndian.PutUint32(tmpLen, uint32(tmp.Len()))\n\t\tdata.Write(tmpLen)\n\t\tdata.Write(tmp.Bytes())\n\n\t\ttotalSize += tmp.Len()\n\t\tif totalSize >= maxObjectPackSize {\n\t\t\tbreak\n\t\t}\n\t}\n\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(data.Len()))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data.Bytes())\n\treturn nil\n}\n\nfunc headCommitsMultiCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvar repoIDList []string\n\tif err := json.NewDecoder(r.Body).Decode(&repoIDList); err != nil {\n\t\treturn &appError{err, \"\", http.StatusBadRequest}\n\t}\n\tif len(repoIDList) == 0 {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\n\tvar repoIDs strings.Builder\n\tfor i := 0; i < len(repoIDList); i++ {\n\t\tif !utils.IsValidUUID(repoIDList[i]) {\n\t\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t\t}\n\t\tif i == 0 {\n\t\t\trepoIDs.WriteString(fmt.Sprintf(\"'%s'\", repoIDList[i]))\n\t\t} else {\n\t\t\trepoIDs.WriteString(fmt.Sprintf(\",'%s'\", repoIDList[i]))\n\t\t}\n\t}\n\n\tsqlStr := fmt.Sprintf(\n\t\t\"SELECT repo_id, commit_id FROM Branch WHERE name='master' AND \"+\n\t\t\t\"repo_id IN (%s) LOCK IN SHARE MODE\",\n\t\trepoIDs.String())\n\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trows, err := seafileDB.QueryContext(ctx, sqlStr)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get commit id: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdefer rows.Close()\n\n\tcommitIDMap := make(map[string]string)\n\tvar repoID string\n\tvar commitID string\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&repoID, &commitID); err == nil {\n\t\t\tcommitIDMap[repoID] = commitID\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\terr := fmt.Errorf(\"Failed to get commit id: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdata, err := json.Marshal(commitIDMap)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to marshal json: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\trsp.Header().Set(\"Content-Length\", strconv.Itoa(len(data)))\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data)\n\n\treturn nil\n}\n\nfunc getCheckQuotaCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\n\tif _, err := validateToken(r, repoID, false); err != nil {\n\t\treturn err\n\t}\n\n\tqueries := r.URL.Query()\n\tdelta := queries.Get(\"delta\")\n\tif delta == \"\" {\n\t\tmsg := \"Invalid delta parameter\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tdeltaNum, err := strconv.ParseInt(delta, 10, 64)\n\tif err != nil {\n\t\tmsg := \"Invalid delta parameter\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tret, err := checkQuota(repoID, deltaNum)\n\tif err != nil {\n\t\tmsg := \"Internal error.\\n\"\n\t\terr := fmt.Errorf(\"failed to check quota: %v\", err)\n\t\treturn &appError{err, msg, http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\treturn nil\n}\n\nfunc getJWTTokenCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\n\tif !option.EnableNotification {\n\t\treturn &appError{nil, \"\", http.StatusNotFound}\n\t}\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\texp := time.Now().Add(time.Hour * 72).Unix()\n\ttokenString, err := utils.GenNotifJWTToken(repoID, user, exp)\n\tif err != nil {\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdata := fmt.Sprintf(\"{\\\"jwt_token\\\":\\\"%s\\\"}\", tokenString)\n\n\trsp.Write([]byte(data))\n\n\treturn nil\n}\n\nfunc getFsObjIDCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\trecvChan := make(chan *calResult)\n\n\tcalFsIdPool.AddTask(recvChan, rsp, r)\n\tresult := <-recvChan\n\treturn result.err\n}\n\nfunc headCommitOperCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == http.MethodGet {\n\t\treturn getHeadCommit(rsp, r)\n\t} else if r.Method == http.MethodPut {\n\t\treturn putUpdateBranchCB(rsp, r)\n\t}\n\treturn &appError{nil, \"\", http.StatusBadRequest}\n}\n\nfunc commitOperCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == http.MethodGet {\n\t\treturn getCommitInfo(rsp, r)\n\t} else if r.Method == http.MethodPut {\n\t\treturn putCommitCB(rsp, r)\n\t}\n\treturn &appError{nil, \"\", http.StatusBadRequest}\n}\n\nfunc blockOperCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tif r.Method == http.MethodGet {\n\t\treturn getBlockInfo(rsp, r)\n\t} else if r.Method == http.MethodPut {\n\t\treturn putSendBlockCB(rsp, r)\n\t}\n\treturn &appError{nil, \"\", http.StatusBadRequest}\n}\n\nfunc putSendBlockCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tblockID := vars[\"id\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tappErr = checkPermission(repoID, user, \"upload\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tif err := blockmgr.Write(storeID, blockID, r.Body); err != nil {\n\t\terr := fmt.Errorf(\"Failed to write block %.8s:%s: %v\", storeID, blockID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tsendStatisticMsg(storeID, user, \"sync-file-upload\", uint64(r.ContentLength))\n\n\treturn nil\n}\n\nfunc getBlockInfo(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tblockID := vars[\"id\"]\n\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tstoreID, err := getRepoStoreID(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get repo store id by repo id %s: %v\", repoID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tblockSize, err := blockmgr.Stat(storeID, blockID)\n\tif err != nil {\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif blockSize <= 0 {\n\t\terr := fmt.Errorf(\"block %.8s:%s size invalid\", storeID, blockID)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tblockLen := fmt.Sprintf(\"%d\", blockSize)\n\trsp.Header().Set(\"Content-Length\", blockLen)\n\tif err := blockmgr.Read(storeID, blockID, rsp); err != nil {\n\t\tif !isNetworkErr(err) {\n\t\t\tlog.Errorf(\"failed to read block %s: %v\", blockID, err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tsendStatisticMsg(storeID, user, \"sync-file-download\", uint64(blockSize))\n\treturn nil\n}\n\nfunc getRepoStoreID(repoID string) (string, error) {\n\tvar storeID string\n\n\tif value, ok := virtualRepoInfoCache.Load(repoID); ok {\n\t\tif info, ok := value.(*virtualRepoInfo); ok {\n\t\t\tif info.storeID != \"\" {\n\t\t\t\tstoreID = info.storeID\n\t\t\t} else {\n\t\t\t\tstoreID = repoID\n\t\t\t}\n\t\t\tinfo.expireTime = time.Now().Unix() + virtualRepoExpireTime\n\t\t}\n\t}\n\tif storeID != \"\" {\n\t\treturn storeID, nil\n\t}\n\n\tvar vInfo virtualRepoInfo\n\tvar rID, originRepoID sql.NullString\n\tsqlStr := \"SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?\"\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&rID, &originRepoID); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\tvInfo.storeID = repoID\n\t\t\tvInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime\n\t\t\tvirtualRepoInfoCache.Store(repoID, &vInfo)\n\t\t\treturn repoID, nil\n\t\t}\n\t\treturn \"\", err\n\t}\n\n\tif !rID.Valid || !originRepoID.Valid {\n\t\treturn \"\", nil\n\t}\n\n\tvInfo.storeID = originRepoID.String\n\tvInfo.expireTime = time.Now().Unix() + virtualRepoExpireTime\n\tvirtualRepoInfoCache.Store(repoID, &vInfo)\n\treturn originRepoID.String, nil\n}\n\nfunc sendStatisticMsg(repoID, user, operation string, bytes uint64) {\n\trData := &statsEventData{operation, user, repoID, bytes}\n\n\tpublishStatsEvent(rData)\n}\n\nfunc publishStatsEvent(rData *statsEventData) {\n\tdata := make(map[string]interface{})\n\tdata[\"msg_type\"] = rData.eType\n\tdata[\"user_name\"] = rData.user\n\tdata[\"repo_id\"] = rData.repoID\n\tdata[\"bytes\"] = rData.bytes\n\tjsonData, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t\treturn\n\t}\n\tif _, err := rpcclient.Call(\"publish_event\", seafileServerChannelStats, string(jsonData)); err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t}\n}\n\nfunc saveLastGCID(repoID, token string) error {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\treturn fmt.Errorf(\"failed to get repo: %s\", repoID)\n\t}\n\tgcID, err := repomgr.GetCurrentGCID(repo.StoreID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn repomgr.SetLastGCID(repoID, token, gcID)\n}\n\nfunc putCommitCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tcommitID := vars[\"id\"]\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tappErr = checkPermission(repoID, user, \"upload\", true)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tdata, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn &appError{nil, err.Error(), http.StatusBadRequest}\n\t}\n\n\tcommit := new(commitmgr.Commit)\n\tif err := commit.FromData(data); err != nil {\n\t\treturn &appError{nil, err.Error(), http.StatusBadRequest}\n\t}\n\n\tif commit.RepoID != repoID {\n\t\tmsg := \"The repo id in commit does not match current repo id\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tif err := commitmgr.Save(commit); err != nil {\n\t\terr := fmt.Errorf(\"Failed to add commit %s: %v\", commitID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t} else {\n\t\ttoken := r.Header.Get(\"Seafile-Repo-Token\")\n\t\tif token == \"\" {\n\t\t\ttoken = utils.GetAuthorizationToken(r.Header)\n\t\t}\n\t\tif err := saveLastGCID(repoID, token); err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to save gc id: %v\", err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getCommitInfo(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tcommitID := vars[\"id\"]\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tappErr = checkPermission(repoID, user, \"download\", false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\tif exists, _ := commitmgr.Exists(repoID, commitID); !exists {\n\t\treturn &appError{nil, \"\", http.StatusNotFound}\n\t}\n\n\tvar data bytes.Buffer\n\terr := commitmgr.ReadRaw(repoID, commitID, &data)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to read commit %s:%s: %v\", repoID, commitID, err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tdataLen := strconv.Itoa(data.Len())\n\trsp.Header().Set(\"Content-Length\", dataLen)\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write(data.Bytes())\n\n\treturn nil\n}\n\nfunc putUpdateBranchCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tqueries := r.URL.Query()\n\tnewCommitID := queries.Get(\"head\")\n\tif newCommitID == \"\" || !utils.IsObjectIDValid(newCommitID) {\n\t\tmsg := fmt.Sprintf(\"commit id %s is invalid\", newCommitID)\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tuser, appErr := validateToken(r, repoID, false)\n\tif appErr != nil {\n\t\treturn appErr\n\t}\n\n\tappErr = checkPermission(repoID, user, \"upload\", false)\n\tif appErr != nil && appErr.Code == http.StatusForbidden {\n\t\treturn appErr\n\t}\n\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"Repo %s is missing or corrupted\", repoID)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tnewCommit, err := commitmgr.Load(repoID, newCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get commit %s for repo %s\", newCommitID, repoID)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tbase, err := commitmgr.Load(repoID, newCommit.ParentID.String)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to get commit %s for repo %s\", newCommit.ParentID.String, repoID)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\n\tif includeInvalidPath(base, newCommit) {\n\t\tmsg := \"Dir or file name is ..\"\n\t\treturn &appError{nil, msg, http.StatusBadRequest}\n\t}\n\n\tret, err := checkQuota(repoID, 0)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to check quota: %v\", err)\n\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif ret == 1 {\n\t\tmsg := \"Out of quota.\\n\"\n\t\treturn &appError{nil, msg, seafHTTPResNoQuota}\n\t}\n\n\tif option.VerifyClientBlocks {\n\t\tif body, err := checkBlocks(r.Context(), repo, base, newCommit); err != nil {\n\t\t\treturn &appError{nil, body, seafHTTPResBlockMissing}\n\t\t}\n\t}\n\n\ttoken := r.Header.Get(\"Seafile-Repo-Token\")\n\tif token == \"\" {\n\t\ttoken = utils.GetAuthorizationToken(r.Header)\n\t}\n\tif err := fastForwardOrMerge(user, token, repo, base, newCommit); err != nil {\n\t\tif errors.Is(err, ErrGCConflict) {\n\t\t\treturn &appError{nil, \"GC Conflict.\\n\", http.StatusConflict}\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"Fast forward merge for repo %s is failed: %v\", repoID, err)\n\t\t\treturn &appError{err, \"\", http.StatusInternalServerError}\n\t\t}\n\t}\n\n\tgo mergeVirtualRepoPool.AddTask(repoID, \"\")\n\n\tgo updateSizePool.AddTask(repoID)\n\n\trsp.WriteHeader(http.StatusOK)\n\treturn nil\n}\n\ntype checkBlockAux struct {\n\tstoreID  string\n\tversion  int\n\tfileList []string\n}\n\nfunc checkBlocks(ctx context.Context, repo *repomgr.Repo, base, remote *commitmgr.Commit) (string, error) {\n\taux := new(checkBlockAux)\n\taux.storeID = repo.StoreID\n\taux.version = repo.Version\n\topt := &diff.DiffOptions{\n\t\tFileCB: checkFileBlocks,\n\t\tDirCB:  checkDirCB,\n\t\tCtx:    ctx,\n\t\tRepoID: repo.StoreID}\n\topt.Data = aux\n\n\ttrees := []string{base.RootID, remote.RootID}\n\tif err := diff.DiffTrees(trees, opt); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(aux.fileList) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tbody, _ := json.Marshal(aux.fileList)\n\n\treturn string(body), fmt.Errorf(\"block is missing\")\n}\n\nfunc checkFileBlocks(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn context.Canceled\n\tdefault:\n\t}\n\n\tfile1 := files[0]\n\tfile2 := files[1]\n\n\taux, ok := data.(*checkBlockAux)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert results\")\n\t\treturn err\n\t}\n\n\tif file2 == nil || file2.ID == emptySHA1 || (file1 != nil && file1.ID == file2.ID) {\n\t\treturn nil\n\t}\n\n\tfile, err := fsmgr.GetSeafile(aux.storeID, file2.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, blkID := range file.BlkIDs {\n\t\tif !blockmgr.Exists(aux.storeID, blkID) {\n\t\t\taux.fileList = append(aux.fileList, file2.Name)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkDirCB(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn context.Canceled\n\tdefault:\n\t}\n\n\tdir1 := dirs[0]\n\tdir2 := dirs[1]\n\n\tif dir1 == nil {\n\t\t// if dir2 is empty, stop diff.\n\t\tif dir2.ID == diff.EmptySha1 {\n\t\t\t*recurse = false\n\t\t} else {\n\t\t\t*recurse = true\n\t\t}\n\t\treturn nil\n\t}\n\n\t// if dir2 is not exist, stop diff.\n\tif dir2 == nil {\n\t\t*recurse = false\n\t\treturn nil\n\t}\n\n\t// if dir1 and dir2 are the same or dir2 is empty, stop diff.\n\tif dir1.ID == dir2.ID || dir2.ID == diff.EmptySha1 {\n\t\t*recurse = false\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\nfunc includeInvalidPath(baseCommit, newCommit *commitmgr.Commit) bool {\n\tvar results []*diff.DiffEntry\n\tif err := diff.DiffCommits(baseCommit, newCommit, &results, true); err != nil {\n\t\tlog.Infof(\"Failed to diff commits: %v\", err)\n\t\treturn false\n\t}\n\n\tfor _, entry := range results {\n\t\tif entry.NewName != \"\" {\n\t\t\tif shouldIgnore(entry.NewName) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else {\n\t\t\tif shouldIgnore(entry.Name) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc getHeadCommit(rsp http.ResponseWriter, r *http.Request) *appError {\n\tvars := mux.Vars(r)\n\trepoID := vars[\"repoid\"]\n\tsqlStr := \"SELECT EXISTS(SELECT 1 FROM Repo WHERE repo_id=?)\"\n\tvar exists bool\n\tctx, cancel := context.WithTimeout(context.Background(), option.DBOpTimeout)\n\tdefer cancel()\n\trow := seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\tif err := row.Scan(&exists); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\tlog.Errorf(\"DB error when check repo %s existence: %v\", repoID, err)\n\t\t\tmsg := `{\"is_corrupted\": 1}`\n\t\t\trsp.WriteHeader(http.StatusOK)\n\t\t\trsp.Write([]byte(msg))\n\t\t\treturn nil\n\t\t}\n\t}\n\tif !exists {\n\t\treturn &appError{nil, \"\", seafHTTPResRepoDeleted}\n\t}\n\n\tif _, err := validateToken(r, repoID, false); err != nil {\n\t\treturn err\n\t}\n\n\tvar commitID string\n\tsqlStr = \"SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?\"\n\trow = seafileDB.QueryRowContext(ctx, sqlStr, repoID)\n\n\tif err := row.Scan(&commitID); err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\tlog.Errorf(\"DB error when get branch master: %v\", err)\n\t\t\tmsg := `{\"is_corrupted\": 1}`\n\t\t\trsp.WriteHeader(http.StatusOK)\n\t\t\trsp.Write([]byte(msg))\n\t\t\treturn nil\n\t\t}\n\t}\n\tif commitID == \"\" {\n\t\treturn &appError{nil, \"\", http.StatusBadRequest}\n\t}\n\n\tmsg := fmt.Sprintf(\"{\\\"is_corrupted\\\": 0, \\\"head_commit_id\\\": \\\"%s\\\"}\", commitID)\n\trsp.WriteHeader(http.StatusOK)\n\trsp.Write([]byte(msg))\n\treturn nil\n}\n\nfunc checkPermission(repoID, user, op string, skipCache bool) *appError {\n\tvar info *permInfo\n\tif !skipCache {\n\t\tif value, ok := permCache.Load(fmt.Sprintf(\"%s:%s:%s\", repoID, user, op)); ok {\n\t\t\tinfo = value.(*permInfo)\n\t\t}\n\t}\n\tif info != nil {\n\t\treturn nil\n\t}\n\n\tpermCache.Delete(fmt.Sprintf(\"%s:%s:%s\", repoID, user, op))\n\n\tif op == \"upload\" {\n\t\tstatus, err := repomgr.GetRepoStatus(repoID)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Failed to get repo status by repo id %s: %v\", repoID, err)\n\t\t\treturn &appError{nil, msg, http.StatusForbidden}\n\t\t}\n\t\tif status != repomgr.RepoStatusNormal && status != -1 {\n\t\t\treturn &appError{nil, \"\", http.StatusForbidden}\n\t\t}\n\t}\n\n\tperm := share.CheckPerm(repoID, user)\n\tif perm != \"\" {\n\t\tif perm == \"r\" && op == \"upload\" {\n\t\t\treturn &appError{nil, \"\", http.StatusForbidden}\n\t\t}\n\t\tinfo = new(permInfo)\n\t\tinfo.perm = perm\n\t\tinfo.expireTime = time.Now().Unix() + permExpireTime\n\t\tpermCache.Store(fmt.Sprintf(\"%s:%s:%s\", repoID, user, op), info)\n\t\treturn nil\n\t}\n\n\treturn &appError{nil, \"\", http.StatusForbidden}\n}\n\nfunc validateToken(r *http.Request, repoID string, skipCache bool) (string, *appError) {\n\ttoken := r.Header.Get(\"Seafile-Repo-Token\")\n\tif token == \"\" {\n\t\ttoken = utils.GetAuthorizationToken(r.Header)\n\t\tif token == \"\" {\n\t\t\tmsg := \"token is null\"\n\t\t\treturn \"\", &appError{nil, msg, http.StatusBadRequest}\n\t\t}\n\t}\n\n\tif !skipCache {\n\t\tif value, ok := tokenCache.Load(token); ok {\n\t\t\tif info, ok := value.(*tokenInfo); ok {\n\t\t\t\tif info.repoID != repoID {\n\t\t\t\t\tmsg := \"Invalid token\"\n\t\t\t\t\treturn \"\", &appError{nil, msg, http.StatusForbidden}\n\t\t\t\t}\n\t\t\t\treturn info.email, nil\n\t\t\t}\n\t\t}\n\t}\n\n\temail, err := repomgr.GetEmailByToken(repoID, token)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get email by token %s: %v\", token, err)\n\t\ttokenCache.Delete(token)\n\t\treturn email, &appError{err, \"\", http.StatusInternalServerError}\n\t}\n\tif email == \"\" {\n\t\ttokenCache.Delete(token)\n\t\tmsg := fmt.Sprintf(\"Failed to get email by token %s\", token)\n\t\treturn email, &appError{nil, msg, http.StatusForbidden}\n\t}\n\n\tinfo := new(tokenInfo)\n\tinfo.email = email\n\tinfo.expireTime = time.Now().Unix() + tokenExpireTime\n\tinfo.repoID = repoID\n\ttokenCache.Store(token, info)\n\n\treturn email, nil\n}\n\nfunc validateClientVer(clientVer string) int {\n\tversions := strings.Split(clientVer, \".\")\n\tif len(versions) != 3 {\n\t\treturn http.StatusBadRequest\n\t}\n\tif _, err := strconv.Atoi(versions[0]); err != nil {\n\t\treturn http.StatusBadRequest\n\t}\n\tif _, err := strconv.Atoi(versions[1]); err != nil {\n\t\treturn http.StatusBadRequest\n\t}\n\tif _, err := strconv.Atoi(versions[2]); err != nil {\n\t\treturn http.StatusBadRequest\n\t}\n\n\treturn http.StatusOK\n}\n\nfunc getClientIPAddr(r *http.Request) string {\n\txForwardedFor := r.Header.Get(\"X-Forwarded-For\")\n\taddr := strings.TrimSpace(strings.Split(xForwardedFor, \",\")[0])\n\tip := net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn ip.String()\n\t}\n\n\taddr = strings.TrimSpace(r.Header.Get(\"X-Real-Ip\"))\n\tip = net.ParseIP(addr)\n\tif ip != nil {\n\t\treturn ip.String()\n\t}\n\n\tif addr, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)); err == nil {\n\t\tip = net.ParseIP(addr)\n\t\tif ip != nil {\n\t\t\treturn ip.String()\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\nfunc onRepoOper(eType, repoID, user, ip, clientName string) {\n\trData := new(repoEventData)\n\tvInfo, err := repomgr.GetVirtualRepoInfo(repoID)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to get virtual repo info by repo id %s: %v\", repoID, err)\n\t\treturn\n\t}\n\tif vInfo != nil {\n\t\trData.repoID = vInfo.OriginRepoID\n\t\trData.path = vInfo.Path\n\t} else {\n\t\trData.repoID = repoID\n\t}\n\trData.eType = eType\n\trData.user = user\n\trData.ip = ip\n\trData.clientName = clientName\n\n\tpublishRepoEvent(rData)\n}\n\nfunc publishRepoEvent(rData *repoEventData) {\n\tif rData.path == \"\" {\n\t\trData.path = \"/\"\n\t}\n\tdata := make(map[string]interface{})\n\tdata[\"msg_type\"] = rData.eType\n\tdata[\"user_name\"] = rData.user\n\tdata[\"ip\"] = rData.ip\n\tdata[\"user_agent\"] = rData.clientName\n\tdata[\"repo_id\"] = rData.repoID\n\tdata[\"file_path\"] = rData.path\n\tjsonData, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t\treturn\n\t}\n\tif _, err := rpcclient.Call(\"publish_event\", seafileServerChannelEvent, string(jsonData)); err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t}\n}\n\nfunc publishUpdateEvent(repoID string, commitID string) {\n\tdata := make(map[string]interface{})\n\tdata[\"msg_type\"] = \"repo-update\"\n\tdata[\"repo_id\"] = repoID\n\tdata[\"commit_id\"] = commitID\n\tjsonData, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t\treturn\n\t}\n\tif _, err := rpcclient.Call(\"publish_event\", seafileServerChannelEvent, string(jsonData)); err != nil {\n\t\tlog.Warnf(\"Failed to publish event: %v\", err)\n\t}\n}\n\nfunc removeSyncAPIExpireCache() {\n\tdeleteTokens := func(key interface{}, value interface{}) bool {\n\t\tif info, ok := value.(*tokenInfo); ok {\n\t\t\tif info.expireTime <= time.Now().Unix() {\n\t\t\t\ttokenCache.Delete(key)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tdeletePerms := func(key interface{}, value interface{}) bool {\n\t\tif info, ok := value.(*permInfo); ok {\n\t\t\tif info.expireTime <= time.Now().Unix() {\n\t\t\t\tpermCache.Delete(key)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tdeleteVirtualRepoInfo := func(key interface{}, value interface{}) bool {\n\t\tif info, ok := value.(*virtualRepoInfo); ok {\n\t\t\tif info.expireTime <= time.Now().Unix() {\n\t\t\t\tvirtualRepoInfoCache.Delete(key)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\ttokenCache.Range(deleteTokens)\n\tpermCache.Range(deletePerms)\n\tvirtualRepoInfoCache.Range(deleteVirtualRepoInfo)\n}\n\ntype collectFsInfo struct {\n\tstartTime int64\n\tisTimeout bool\n\tresults   []interface{}\n}\n\nvar ErrTimeout = fmt.Errorf(\"get fs id list timeout\")\n\nfunc calculateSendObjectList(ctx context.Context, repo *repomgr.Repo, serverHead string, clientHead string, dirOnly bool) ([]interface{}, error) {\n\tmasterHead, err := commitmgr.Load(repo.ID, serverHead)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to load server head commit %s:%s: %v\", repo.ID, serverHead, err)\n\t\treturn nil, err\n\t}\n\tvar remoteHead *commitmgr.Commit\n\tremoteHeadRoot := emptySHA1\n\tif clientHead != \"\" {\n\t\tremoteHead, err = commitmgr.Load(repo.ID, clientHead)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Failed to load remote head commit %s:%s: %v\", repo.ID, clientHead, err)\n\t\t\treturn nil, err\n\t\t}\n\t\tremoteHeadRoot = remoteHead.RootID\n\t}\n\n\tinfo := new(collectFsInfo)\n\tinfo.startTime = time.Now().Unix()\n\tif remoteHeadRoot != masterHead.RootID && masterHead.RootID != emptySHA1 {\n\t\tinfo.results = append(info.results, masterHead.RootID)\n\t}\n\n\tvar opt *diff.DiffOptions\n\tif !dirOnly {\n\t\topt = &diff.DiffOptions{\n\t\t\tFileCB: collectFileIDs,\n\t\t\tDirCB:  collectDirIDs,\n\t\t\tCtx:    ctx,\n\t\t\tRepoID: repo.StoreID}\n\t\topt.Data = info\n\t} else {\n\t\topt = &diff.DiffOptions{\n\t\t\tFileCB: collectFileIDsNOp,\n\t\t\tDirCB:  collectDirIDs,\n\t\t\tCtx:    ctx,\n\t\t\tRepoID: repo.StoreID}\n\t\topt.Data = info\n\t}\n\ttrees := []string{masterHead.RootID, remoteHeadRoot}\n\n\tif err := diff.DiffTrees(trees, opt); err != nil {\n\t\tif info.isTimeout {\n\t\t\treturn nil, ErrTimeout\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn info.results, nil\n}\n\nfunc collectFileIDs(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn context.Canceled\n\tdefault:\n\t}\n\n\tfile1 := files[0]\n\tfile2 := files[1]\n\tinfo, ok := data.(*collectFsInfo)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert results\")\n\t\treturn err\n\t}\n\n\tif file1 != nil &&\n\t\t(file2 == nil || file1.ID != file2.ID) &&\n\t\tfile1.ID != emptySHA1 {\n\t\tinfo.results = append(info.results, file1.ID)\n\t}\n\n\treturn nil\n}\n\nfunc collectFileIDsNOp(ctx context.Context, baseDir string, files []*fsmgr.SeafDirent, data interface{}) error {\n\treturn nil\n}\n\nfunc collectDirIDs(ctx context.Context, baseDir string, dirs []*fsmgr.SeafDirent, data interface{}, recurse *bool) error {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn context.Canceled\n\tdefault:\n\t}\n\n\tinfo, ok := data.(*collectFsInfo)\n\tif !ok {\n\t\terr := fmt.Errorf(\"failed to assert fs info\")\n\t\treturn err\n\t}\n\tdir1 := dirs[0]\n\tdir2 := dirs[1]\n\n\tif dir1 != nil &&\n\t\t(dir2 == nil || dir1.ID != dir2.ID) &&\n\t\tdir1.ID != emptySHA1 {\n\t\tinfo.results = append(info.results, dir1.ID)\n\t}\n\n\tif option.FsIdListRequestTimeout > 0 {\n\t\tnow := time.Now().Unix()\n\t\tif now-info.startTime > option.FsIdListRequestTimeout {\n\t\t\tinfo.isTimeout = true\n\t\t\treturn ErrTimeout\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "fileserver/utils/dup2.go",
    "content": "//go:build !(linux && arm64)\n\npackage utils\n\nimport (\n\t\"syscall\"\n)\n\nfunc Dup(from, to int) error {\n\treturn syscall.Dup2(from, to)\n}\n"
  },
  {
    "path": "fileserver/utils/dup3.go",
    "content": "//go:build linux && arm64\n\npackage utils\n\nimport (\n\t\"syscall\"\n)\n\nfunc Dup(from, to int) error {\n\treturn syscall.Dup3(from, to, 0)\n}\n"
  },
  {
    "path": "fileserver/utils/http.go",
    "content": "package utils\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc GetAuthorizationToken(h http.Header) string {\n\tauth := h.Get(\"Authorization\")\n\tsplitResult := strings.Split(auth, \" \")\n\tif len(splitResult) > 1 {\n\t\treturn splitResult[1]\n\t}\n\treturn \"\"\n}\n\nfunc HttpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) {\n\theader[\"Content-Type\"] = []string{\"application/json\"}\n\theader[\"User-Agent\"] = []string{\"Seafile Server\"}\n\tctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)\n\tdefer cancel()\n\treq, err := http.NewRequestWithContext(ctx, method, url, reader)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, nil, err\n\t}\n\treq.Header = header\n\n\trsp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn http.StatusInternalServerError, nil, err\n\t}\n\tdefer rsp.Body.Close()\n\n\tif rsp.StatusCode != http.StatusOK {\n\t\terrMsg := parseErrorMessage(rsp.Body)\n\t\treturn rsp.StatusCode, errMsg, fmt.Errorf(\"bad response %d for %s\", rsp.StatusCode, url)\n\t}\n\n\tbody, err := io.ReadAll(rsp.Body)\n\tif err != nil {\n\t\treturn rsp.StatusCode, nil, err\n\t}\n\n\treturn http.StatusOK, body, nil\n}\n\nfunc parseErrorMessage(r io.Reader) []byte {\n\tbody, err := io.ReadAll(r)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tvar objs map[string]string\n\terr = json.Unmarshal(body, &objs)\n\tif err != nil {\n\t\treturn body\n\t}\n\terrMsg, ok := objs[\"error_msg\"]\n\tif ok {\n\t\treturn []byte(errMsg)\n\t}\n\n\treturn body\n}\n"
  },
  {
    "path": "fileserver/utils/utils.go",
    "content": "package utils\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tjwt \"github.com/golang-jwt/jwt/v5\"\n\t\"github.com/google/uuid\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n)\n\nfunc IsValidUUID(u string) bool {\n\t_, err := uuid.Parse(u)\n\treturn err == nil\n}\n\nfunc IsObjectIDValid(objID string) bool {\n\tif len(objID) != 40 {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(objID); i++ {\n\t\tc := objID[i]\n\t\tif (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') {\n\t\t\tcontinue\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype SeahubClaims struct {\n\tExp        int64 `json:\"exp\"`\n\tIsInternal bool  `json:\"is_internal\"`\n\tjwt.RegisteredClaims\n}\n\nfunc (*SeahubClaims) Valid() error {\n\treturn nil\n}\n\nfunc GenSeahubJWTToken() (string, error) {\n\tclaims := new(SeahubClaims)\n\tclaims.Exp = time.Now().Add(time.Second * 300).Unix()\n\tclaims.IsInternal = true\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), claims)\n\ttokenString, err := token.SignedString([]byte(option.JWTPrivateKey))\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to gen seahub jwt token: %w\", err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}\n\ntype MyClaims struct {\n\tExp      int64  `json:\"exp\"`\n\tRepoID   string `json:\"repo_id\"`\n\tUserName string `json:\"username\"`\n\tjwt.RegisteredClaims\n}\n\nfunc (*MyClaims) Valid() error {\n\treturn nil\n}\n\nfunc GenNotifJWTToken(repoID, user string, exp int64) (string, error) {\n\tclaims := new(MyClaims)\n\tclaims.Exp = exp\n\tclaims.RepoID = repoID\n\tclaims.UserName = user\n\n\ttoken := jwt.NewWithClaims(jwt.GetSigningMethod(\"HS256\"), claims)\n\ttokenString, err := token.SignedString([]byte(option.JWTPrivateKey))\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to gen jwt token for repo %s: %w\", repoID, err)\n\t\treturn \"\", err\n\t}\n\n\treturn tokenString, nil\n}\n"
  },
  {
    "path": "fileserver/virtual_repo.go",
    "content": "package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"math/rand\"\n\n\t\"github.com/haiwen/seafile-server/fileserver/commitmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/diff\"\n\t\"github.com/haiwen/seafile-server/fileserver/fsmgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/option\"\n\t\"github.com/haiwen/seafile-server/fileserver/repomgr\"\n\t\"github.com/haiwen/seafile-server/fileserver/workerpool\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nconst mergeVirtualRepoWorkerNumber = 5\n\nvar mergeVirtualRepoPool *workerpool.WorkPool\n\nvar runningRepo = make(map[string]struct{})\nvar runningRepoMutex sync.Mutex\n\nfunc virtualRepoInit() {\n\tmergeVirtualRepoPool = workerpool.CreateWorkerPool(mergeVirtualRepo, mergeVirtualRepoWorkerNumber)\n}\n\nfunc mergeVirtualRepo(args ...interface{}) error {\n\tif len(args) < 1 {\n\t\treturn nil\n\t}\n\trepoID := args[0].(string)\n\tvirtual, err := repomgr.IsVirtualRepo(repoID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif virtual {\n\t\trunningRepoMutex.Lock()\n\t\tif _, ok := runningRepo[repoID]; ok {\n\t\t\tlog.Debugf(\"a task for repo %s is already running\", repoID)\n\t\t\tgo mergeVirtualRepoPool.AddTask(repoID)\n\t\t\trunningRepoMutex.Unlock()\n\t\t\treturn nil\n\t\t}\n\t\trunningRepo[repoID] = struct{}{}\n\t\trunningRepoMutex.Unlock()\n\n\t\terr := mergeRepo(repoID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%v\", err)\n\t\t}\n\t\trunningRepoMutex.Lock()\n\t\tdelete(runningRepo, repoID)\n\t\trunningRepoMutex.Unlock()\n\n\t\tgo updateSizePool.AddTask(repoID)\n\n\t\treturn nil\n\t}\n\n\texcludeRepo := \"\"\n\tif len(args) > 1 {\n\t\texcludeRepo = args[1].(string)\n\t}\n\tvRepos, _ := repomgr.GetVirtualRepoIDsByOrigin(repoID)\n\tfor _, id := range vRepos {\n\t\tif id == excludeRepo {\n\t\t\tcontinue\n\t\t}\n\t\trunningRepoMutex.Lock()\n\t\tif _, ok := runningRepo[id]; ok {\n\t\t\tlog.Debugf(\"a task for repo %s is already running\", id)\n\t\t\tgo mergeVirtualRepoPool.AddTask(id)\n\t\t\trunningRepoMutex.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\trunningRepo[id] = struct{}{}\n\t\trunningRepoMutex.Unlock()\n\n\t\terr := mergeRepo(id)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%v\", err)\n\t\t}\n\t\trunningRepoMutex.Lock()\n\t\tdelete(runningRepo, id)\n\t\trunningRepoMutex.Unlock()\n\t}\n\n\tgo updateSizePool.AddTask(repoID)\n\n\treturn nil\n}\n\nfunc mergeRepo(repoID string) error {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get virt repo %.10s\", repoID)\n\t\treturn err\n\t}\n\tvInfo := repo.VirtualInfo\n\tif vInfo == nil {\n\t\treturn nil\n\t}\n\torigRepo := repomgr.Get(vInfo.OriginRepoID)\n\tif origRepo == nil {\n\t\terr := fmt.Errorf(\"failed to get orig repo %.10s\", repoID)\n\t\treturn err\n\t}\n\n\thead, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get commit %s:%.8s\", repo.ID, repo.HeadCommitID)\n\t\treturn err\n\t}\n\torigHead, err := commitmgr.Load(origRepo.ID, origRepo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"merge repo %.8s failed: failed to get origin repo commit %s:%.8s\", repoID, origRepo.ID, origRepo.HeadCommitID)\n\t\treturn err\n\t}\n\n\tvar origRoot string\n\torigRoot, err = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, vInfo.Path)\n\tif err != nil && !errors.Is(err, fsmgr.ErrPathNoExist) {\n\t\terr := fmt.Errorf(\"merge repo %.10s failed: failed to get seafdir id by path in origin repo %.10s: %v\", repoID, origRepo.StoreID, err)\n\t\treturn err\n\t}\n\tif origRoot == \"\" {\n\t\tnewPath, _ := handleMissingVirtualRepo(origRepo, origHead, vInfo)\n\t\tif newPath != \"\" {\n\t\t\torigRoot, _ = fsmgr.GetSeafdirIDByPath(origRepo.StoreID, origHead.RootID, newPath)\n\t\t}\n\t\tif origRoot == \"\" {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tbase, err := commitmgr.Load(origRepo.ID, vInfo.BaseCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"merge repo %.8s failed: failed to get origin repo commit %s:%.8s\", repoID, origRepo.ID, vInfo.BaseCommitID)\n\t\treturn err\n\t}\n\n\troot := head.RootID\n\tbaseRoot, _ := fsmgr.GetSeafdirIDByPath(origRepo.StoreID, base.RootID, vInfo.Path)\n\tif baseRoot == \"\" {\n\t\terr := fmt.Errorf(\"merge repo %.10s failed: cannot find seafdir for origin repo %.10s path %s\", repoID, vInfo.OriginRepoID, vInfo.Path)\n\t\treturn err\n\t}\n\n\tif root == origRoot {\n\t} else if baseRoot == root {\n\t\t_, err := updateDir(repoID, \"/\", origRoot, origHead.CreatorName, head.CommitID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to update root of virtual repo %.10s\", repoID)\n\t\t\treturn err\n\t\t}\n\t\trepomgr.SetVirtualRepoBaseCommitPath(repo.ID, origRepo.HeadCommitID, vInfo.Path)\n\t} else if baseRoot == origRoot {\n\t\tnewBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, root, head.CreatorName, origHead.CommitID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"merge repo %.8s failed: failed to update origin repo%.10s path %s\", repoID, vInfo.OriginRepoID, vInfo.Path)\n\t\t\treturn err\n\t\t}\n\t\trepomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)\n\t\tcleanupVirtualRepos(vInfo.OriginRepoID)\n\t\tmergeVirtualRepo(vInfo.OriginRepoID, repoID)\n\t} else {\n\t\troots := []string{baseRoot, origRoot, root}\n\t\topt := new(mergeOptions)\n\t\topt.remoteRepoID = repoID\n\t\topt.remoteHead = head.CommitID\n\n\t\terr := mergeTrees(origRepo.StoreID, roots, opt)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to merge\")\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = updateDir(repoID, \"/\", opt.mergedRoot, origHead.CreatorName, head.CommitID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to update root of virtual repo %.10s\", repoID)\n\t\t\treturn err\n\t\t}\n\n\t\tnewBaseCommit, err := updateDir(vInfo.OriginRepoID, vInfo.Path, opt.mergedRoot, head.CreatorName, origHead.CommitID)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"merge repo %.10s failed: failed to update origin repo %.10s path %s\", repoID, vInfo.OriginRepoID, vInfo.Path)\n\t\t\treturn err\n\t\t}\n\t\trepomgr.SetVirtualRepoBaseCommitPath(repo.ID, newBaseCommit, vInfo.Path)\n\t\tcleanupVirtualRepos(vInfo.OriginRepoID)\n\t\tmergeVirtualRepo(vInfo.OriginRepoID, repoID)\n\t}\n\n\treturn nil\n}\n\nfunc cleanupVirtualRepos(repoID string) error {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"failed to get repo %.10s\", repoID)\n\t\treturn err\n\t}\n\n\thead, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to load commit %s/%s : %v\", repo.ID, repo.HeadCommitID, err)\n\t\treturn err\n\t}\n\n\tvRepos, err := repomgr.GetVirtualRepoInfoByOrigin(repoID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get virtual repo ids by origin repo %.10s\", repoID)\n\t\treturn err\n\t}\n\tfor _, vInfo := range vRepos {\n\t\t_, err := fsmgr.GetSeafdirByPath(repo.StoreID, head.RootID, vInfo.Path)\n\t\tif err != nil {\n\t\t\tif err == fsmgr.ErrPathNoExist {\n\t\t\t\thandleMissingVirtualRepo(repo, head, vInfo)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc handleMissingVirtualRepo(repo *repomgr.Repo, head *commitmgr.Commit, vInfo *repomgr.VRepoInfo) (string, error) {\n\tparent, err := commitmgr.Load(head.RepoID, head.ParentID.String)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to load commit %s/%s : %v\", head.RepoID, head.ParentID.String, err)\n\t\treturn \"\", err\n\t}\n\n\tvar results []*diff.DiffEntry\n\terr = diff.DiffCommits(parent, head, &results, true)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to diff commits\")\n\t\treturn \"\", err\n\t}\n\n\tparPath := vInfo.Path\n\tvar isRenamed bool\n\tvar subPath string\n\tvar returnPath string\n\tfor {\n\t\tvar newPath string\n\t\toldDirID, err := fsmgr.GetSeafdirIDByPath(repo.StoreID, parent.RootID, parPath)\n\t\tif err != nil || oldDirID == \"\" {\n\n\t\t\tif err == fsmgr.ErrPathNoExist {\n\t\t\t\trepomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)\n\t\t\t}\n\t\t\terr := fmt.Errorf(\"failed to find %s under commit %s in repo %s\", parPath, parent.CommitID, repo.StoreID)\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tfor _, de := range results {\n\t\t\tif de.Status == diff.DiffStatusDirRenamed {\n\t\t\t\tif de.Sha1 == oldDirID {\n\t\t\t\t\tif subPath != \"\" {\n\t\t\t\t\t\tnewPath = filepath.Join(\"/\", de.NewName, subPath)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnewPath = filepath.Join(\"/\", de.NewName)\n\t\t\t\t\t}\n\t\t\t\t\trepomgr.SetVirtualRepoBaseCommitPath(vInfo.RepoID, head.CommitID, newPath)\n\t\t\t\t\treturnPath = newPath\n\t\t\t\t\tif subPath == \"\" {\n\t\t\t\t\t\tnewName := filepath.Base(newPath)\n\t\t\t\t\t\terr := editRepo(vInfo.RepoID, newName, \"Changed library name\", \"\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Warnf(\"falied to rename repo %s.\\n\", newName)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tisRenamed = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif isRenamed {\n\t\t\tbreak\n\t\t}\n\n\t\tslash := strings.LastIndex(parPath, \"/\")\n\t\tif slash <= 0 {\n\t\t\tbreak\n\t\t}\n\t\tsubPath = filepath.Base(parPath)\n\t\tparPath = filepath.Dir(parPath)\n\t}\n\n\tif !isRenamed {\n\t\trepomgr.DelVirtualRepo(vInfo.RepoID, option.CloudMode)\n\t}\n\n\treturn returnPath, nil\n}\n\nfunc editRepo(repoID, name, desc, user string) error {\n\tif name == \"\" && desc == \"\" {\n\t\terr := fmt.Errorf(\"at least one argument should be non-null\")\n\t\treturn err\n\t}\n\n\tvar retryCnt int\n\tfor retry, err := editRepoNeedRetry(repoID, name, desc, user); err != nil || retry; {\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"failed to edit repo: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif retryCnt < 3 {\n\t\t\trandom := rand.Intn(10) + 1\n\t\t\ttime.Sleep(time.Duration(random*100) * time.Millisecond)\n\t\t\tretryCnt++\n\t\t} else {\n\t\t\terr := fmt.Errorf(\"stop edit repo %s after 3 retries\", repoID)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc editRepoNeedRetry(repoID, name, desc, user string) (bool, error) {\n\trepo := repomgr.Get(repoID)\n\tif repo == nil {\n\t\terr := fmt.Errorf(\"no such library\")\n\t\treturn false, err\n\t}\n\tif name == \"\" {\n\t\tname = repo.Name\n\t}\n\tif desc == \"\" {\n\t\tdesc = repo.Desc\n\t}\n\n\tparent, err := commitmgr.Load(repo.ID, repo.HeadCommitID)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to get commit %s:%s\", repo.ID, repo.HeadCommitID)\n\t\treturn false, err\n\t}\n\n\tif user == \"\" {\n\t\tuser = parent.CreatorName\n\t}\n\n\tcommit := commitmgr.NewCommit(repoID, parent.CommitID, parent.RootID, user, \"Changed library name or description\")\n\trepomgr.RepoToCommit(repo, commit)\n\tcommit.RepoName = name\n\tcommit.RepoDesc = desc\n\n\terr = commitmgr.Save(commit)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"failed to add commit: %v\", err)\n\t\treturn false, err\n\t}\n\n\t_, err = updateBranch(repoID, repo.StoreID, commit.CommitID, parent.CommitID, \"\", false, \"\")\n\tif err != nil {\n\t\treturn true, nil\n\t}\n\n\trepomgr.UpdateRepoInfo(repoID, commit.CommitID)\n\n\treturn true, nil\n}\n"
  },
  {
    "path": "fileserver/workerpool/workerpool.go",
    "content": "package workerpool\n\nimport (\n\t\"runtime/debug\"\n\n\t\"github.com/dgraph-io/ristretto/z\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\ntype WorkPool struct {\n\tjobs   chan Job\n\tjobCB  JobCB\n\tcloser *z.Closer\n}\n\n// Job is the job object of workpool.\ntype Job struct {\n\tcallback JobCB\n\targs     []interface{}\n}\n\ntype JobCB func(args ...interface{}) error\n\nfunc CreateWorkerPool(jobCB JobCB, n int) *WorkPool {\n\tpool := new(WorkPool)\n\tpool.jobCB = jobCB\n\tpool.jobs = make(chan Job, 100)\n\tpool.closer = z.NewCloser(n)\n\tfor i := 0; i < n; i++ {\n\t\tgo pool.run(pool.jobs)\n\t}\n\treturn pool\n}\n\nfunc (pool *WorkPool) AddTask(args ...interface{}) {\n\tjob := Job{pool.jobCB, args}\n\tpool.jobs <- job\n}\n\nfunc (pool *WorkPool) run(jobs chan Job) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\tdefer pool.closer.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase job := <-pool.jobs:\n\t\t\tif job.callback != nil {\n\t\t\t\terr := job.callback(job.args...)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Errorf(\"failed to call jobs: %v.\\n\", err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-pool.closer.HasBeenClosed():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (pool *WorkPool) Shutdown() {\n\tpool.closer.SignalAndWait()\n}\n"
  },
  {
    "path": "fuse/Makefile.am",
    "content": "AM_CFLAGS = -DPKGDATADIR=\\\"$(pkgdatadir)\\\" \\\n\t-DPACKAGE_DATA_DIR=\\\"\"$(pkgdatadir)\"\\\" \\\n\t-DSEAFILE_SERVER \\\n\t-I$(top_srcdir)/include \\\n\t-I$(top_srcdir)/lib \\\n\t-I$(top_builddir)/lib \\\n\t-I$(top_srcdir)/common \\\n\t@SEARPC_CFLAGS@ \\\n\t@GLIB2_CFLAGS@ \\\n\t@FUSE_CFLAGS@ \\\n\t@MYSQL_CFLAGS@ \\\n\t-Wall\n\nbin_PROGRAMS = seaf-fuse\n\nnoinst_HEADERS = seaf-fuse.h seafile-session.h repo-mgr.h\n\nseaf_fuse_SOURCES = seaf-fuse.c \\\n                    seafile-session.c \\\n\t\t    file.c \\\n\t\t    getattr.c \\\n                    readdir.c \\\n                    repo-mgr.c \\\n                    ../common/block-mgr.c \\\n                    ../common/user-mgr.c \\\n                    ../common/group-mgr.c \\\n                    ../common/org-mgr.c \\\n                    ../common/block-backend.c \\\n                    ../common/block-backend-fs.c \\\n                    ../common/branch-mgr.c \\\n                    ../common/commit-mgr.c \\\n                    ../common/fs-mgr.c \\\n                    ../common/log.c \\\n                    ../common/seaf-db.c \\\n                    ../common/seaf-utils.c \\\n                    ../common/obj-store.c \\\n                    ../common/obj-backend-fs.c \\\n                    ../common/obj-backend-riak.c \\\n                    ../common/seafile-crypt.c \\\n                    ../common/password-hash.c\n\nseaf_fuse_LDADD = @GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ \\\n                  -lsqlite3 @LIBEVENT_LIBS@ \\\n\t\t  $(top_builddir)/common/cdc/libcdc.la \\\n\t\t  @SEARPC_LIBS@ @JANSSON_LIBS@ @FUSE_LIBS@ @ZLIB_LIBS@ \\\n\t\t  @MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@\n\n"
  },
  {
    "path": "fuse/file.c",
    "content": "#include \"common.h\"\n\n#define FUSE_USE_VERSION  26\n#include <fuse.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <seaf-db.h>\n\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"seaf-fuse.h\"\n\nint read_file(SeafileSession *seaf,\n              const char *store_id, int version,\n              Seafile *file,\n              char *buf, size_t size,\n              off_t offset, struct fuse_file_info *info)\n{\n    BlockHandle *handle = NULL;;\n    BlockMetadata *bmd;\n    char *blkid;\n    char *ptr;\n    off_t off = 0, nleft;\n    int i, n, ret = -EIO;\n\n    for (i = 0; i < file->n_blocks; i++) {\n        blkid = file->blk_sha1s[i];\n\n        bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id, version, blkid);\n        if (!bmd)\n            return -EIO;\n\n        if (offset < off + bmd->size) {\n            g_free (bmd);\n            break;\n        }\n\n        off += bmd->size;\n        g_free (bmd);\n    }\n\n    /* beyond the file size */\n    if (i == file->n_blocks)\n        return 0;\n\n    nleft = size;\n    ptr = buf;\n    while (nleft > 0 && i < file->n_blocks) {\n        blkid = file->blk_sha1s[i];\n\n        handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                               store_id, version,\n                                               blkid, BLOCK_READ);\n        if (!handle) {\n            seaf_warning (\"Failed to open block %s:%s.\\n\", store_id, blkid);\n            return -EIO;\n        }\n\n        /* trim the offset in a block */\n        if (offset > off) {\n            char *tmp = (char *)malloc(sizeof(char) * (offset - off));\n            if (!tmp)\n                return -ENOMEM;\n\n            n = seaf_block_manager_read_block(seaf->block_mgr, handle,\n                                              tmp, offset-off);\n            if (n != offset - off) {\n                seaf_warning (\"Failed to read block %s:%s.\\n\", store_id, blkid);\n                free (tmp);\n                goto out;\n            }\n\n            off += n;\n            free(tmp);\n        }\n\n        if ((n = seaf_block_manager_read_block(seaf->block_mgr,\n                                               handle, ptr, nleft)) < 0) {\n            seaf_warning (\"Failed to read block %s:%s.\\n\", store_id, blkid);\n            goto out;\n        }\n\n        nleft -= n;\n        ptr += n;\n        off += n;\n        ++i;\n\n        /* At this point we should have read all the content of the block or\n         * have read up to @size bytes. So it's safe to close the block.\n         */\n        seaf_block_manager_close_block(seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n    }\n\n    return size - nleft;\n\nout:\n    if (handle) {\n        seaf_block_manager_close_block(seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n    }\n    return ret;\n}\n"
  },
  {
    "path": "fuse/getattr.c",
    "content": "#include \"common.h\"\n\n#define FUSE_USE_VERSION  26\n#include <fuse.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <seaf-db.h>\n\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"seaf-fuse.h\"\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n\nstatic CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)\n{\n    return (CcnetEmailUser *)searpc_client_call__object (client,\n                                       \"get_emailuser\", CCNET_TYPE_EMAIL_USER, NULL,\n                                       1, \"string\", user);\n}\n\nstatic int getattr_root(SeafileSession *seaf, struct stat *stbuf)\n{\n    stbuf->st_mode = S_IFDIR | 0755;\n    stbuf->st_nlink = 2;\n    stbuf->st_size = 4096;\n\n    return 0;\n}\n\nstatic int getattr_user(SeafileSession *seaf, const char *user, struct stat *stbuf)\n{\n    CcnetEmailUser *emailuser;\n\n    emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL);\n    if (!emailuser) {\n        return -ENOENT;\n    }\n    g_object_unref (emailuser);\n\n    stbuf->st_mode = S_IFDIR | 0755;\n    stbuf->st_nlink = 2;\n    stbuf->st_size = 4096;\n\n    return 0;\n}\n\nstatic int getattr_repo(SeafileSession *seaf,\n                        const char *user, const char *repo_id, const char *repo_path,\n                        struct stat *stbuf)\n{\n    SeafRepo *repo = NULL;\n    SeafBranch *branch;\n    SeafCommit *commit = NULL;\n    guint32 mode = 0;\n    char *id = NULL;\n    int ret = 0;\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    branch = repo->head;\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                            repo->id, repo->version,\n                                            branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\", repo->id, branch->commit_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,\n                                        repo->store_id, repo->version,\n                                        commit->root_id,\n                                        repo_path, &mode, NULL);\n    if (!id) {\n        seaf_warning (\"Path %s doesn't exist in repo %s.\\n\", repo_path, repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    if (S_ISDIR(mode)) {\n        SeafDir *dir;\n        GList *l;\n        int cnt = 2; /* '.' and '..' */\n\n        dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr,\n                                          repo->store_id, repo->version, id);\n        if (dir) {\n            for (l = dir->entries; l; l = l->next)\n                cnt++;\n        }\n\n        if (strcmp (repo_path, \"/\") != 0) {\n            // get dirent of the dir\n            SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,\n                                                                     repo->store_id,\n                                                                     repo->version,\n                                                                     commit->root_id,\n                                                                     repo_path, NULL);\n            if (dirent && repo->version != 0)\n                stbuf->st_mtime = dirent->mtime;\n\n            seaf_dirent_free (dirent);\n        }\n\n        stbuf->st_size += cnt * sizeof(SeafDirent);\n        stbuf->st_mode = mode | 0755;\n        stbuf->st_nlink = 2;\n\n        seaf_dir_free (dir);\n    } else if (S_ISREG(mode)) {\n        Seafile *file;\n\n        file = seaf_fs_manager_get_seafile(seaf->fs_mgr,\n                                           repo->store_id, repo->version, id);\n        if (file)\n            stbuf->st_size = file->file_size;\n\n        SeafDirent *dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr,\n                                                                 repo->store_id,\n                                                                 repo->version,\n                                                                 commit->root_id,\n                                                                 repo_path, NULL);\n        if (dirent && repo->version != 0)\n            stbuf->st_mtime = dirent->mtime;\n\n        stbuf->st_mode = mode | 0644;\n        stbuf->st_nlink = 1;\n\n        seaf_dirent_free (dirent);\n        seafile_unref (file);\n    } else {\n        return -ENOENT;\n    }\n\nout:\n    g_free (id);\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    return ret;\n}\n\nint do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf)\n{\n    int n_parts;\n    char *user, *repo_id, *repo_path;\n    int ret = 0;\n\n    if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {\n        return -ENOENT;\n    }\n\n    switch (n_parts) {\n    case 0:\n        ret = getattr_root(seaf, stbuf);\n        break;\n    case 1:\n        ret = getattr_user(seaf, user, stbuf);\n        break;\n    case 2:\n    case 3:\n        ret = getattr_repo(seaf, user, repo_id, repo_path, stbuf);\n        break;\n    }\n\n    g_free (user);\n    g_free (repo_id);\n    g_free (repo_path);\n    return ret;\n}\n"
  },
  {
    "path": "fuse/readdir.c",
    "content": "#include \"common.h\"\n\n#define FUSE_USE_VERSION  26\n#include <fuse.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <seaf-db.h>\n\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"seaf-fuse.h\"\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n\nstatic char *replace_slash (const char *repo_name)\n{\n    char *ret = g_strdup(repo_name);\n    char *p;\n\n    for (p = ret; *p != 0; ++p)\n        if (*p == '/')\n            *p = '_';\n\n    return ret;\n}\n\nstatic GList *get_users_from_ccnet (SearpcClient *client, const char *source)\n{\n    return searpc_client_call__objlist (client,\n                                        \"get_emailusers\", CCNET_TYPE_EMAIL_USER, NULL,\n                                        3, \"string\", source, \"int\", -1, \"int\", -1);\n}\n\nstatic CcnetEmailUser *get_user_from_ccnet (SearpcClient *client, const char *user)\n{\n    return (CcnetEmailUser *)searpc_client_call__object (client,\n                                       \"get_emailuser\", CCNET_TYPE_EMAIL_USER, NULL,\n                                       1, \"string\", user);\n}\n\nstatic int readdir_root(SeafileSession *seaf,\n                        void *buf, fuse_fill_dir_t filler, off_t offset,\n                        struct fuse_file_info *info)\n{\n    GList *users, *p;\n    CcnetEmailUser *user;\n    const char *email;\n    GHashTable *user_hash;\n    int dummy;\n\n    user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    users = ccnet_user_manager_get_emailusers (seaf->user_mgr, \"DB\", -1, -1, NULL);\n    for (p = users; p; p = p->next) {\n        user = p->data;\n        email = ccnet_email_user_get_email (user);\n        g_hash_table_insert (user_hash, g_strdup(email), &dummy);\n        g_object_unref (user);\n    }\n    g_list_free (users);\n\n    users = ccnet_user_manager_get_emailusers (seaf->user_mgr, \"LDAPImport\", -1, -1, NULL);\n    for (p = users; p; p = p->next) {\n        user = p->data;\n        email = ccnet_email_user_get_email (user);\n        g_hash_table_insert (user_hash, g_strdup(email), &dummy);\n        g_object_unref (user);\n    }\n    g_list_free (users);\n\n    users = g_hash_table_get_keys (user_hash);\n    for (p = users; p; p = p->next) {\n        email = p->data;\n        char *exclude = g_hash_table_lookup (seaf->excluded_users, email);\n        if (exclude)\n            continue;\n        filler (buf, email, NULL, 0);\n    }\n    g_list_free (users);\n\n    g_hash_table_destroy (user_hash);\n\n    return 0;\n}\n\nstatic int readdir_user(SeafileSession *seaf, const char *user,\n                        void *buf, fuse_fill_dir_t filler, off_t offset,\n                        struct fuse_file_info *info)\n{\n    CcnetEmailUser *emailuser;\n    GList *list = NULL, *p;\n    GString *name;\n\n    emailuser = ccnet_user_manager_get_emailuser (seaf->user_mgr, user, NULL);\n    if (!emailuser) {\n        return -ENOENT;\n    }\n    g_object_unref (emailuser);\n\n    list = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user);\n    if (!list) {\n        return 0;\n    }\n\n    for (p = list; p; p = p->next) {\n        SeafRepo *repo = (SeafRepo *)p->data;\n\n        /* Don't list virtual repos. */\n        if (seaf_repo_manager_is_virtual_repo(seaf->repo_mgr, repo->id)) {\n            seaf_repo_unref (repo);\n            continue;\n        }\n\n        // Don't list encrypted repo\n        if (repo->encrypted) {\n            continue;\n        }\n\n        char *clean_repo_name = replace_slash (repo->name);\n\n        name = g_string_new (\"\");\n        g_string_printf (name, \"%s_%s\", repo->id, clean_repo_name);\n        filler(buf, name->str, NULL, 0);\n        g_string_free (name, TRUE);\n        g_free (clean_repo_name);\n\n        seaf_repo_unref (repo);\n    }\n\n    g_list_free (list);\n\n    return 0;\n}\n\nstatic int readdir_repo(SeafileSession *seaf,\n                        const char *user, const char *repo_id, const char *repo_path,\n                        void *buf, fuse_fill_dir_t filler, off_t offset,\n                        struct fuse_file_info *info)\n{\n    SeafRepo *repo = NULL;\n    SeafBranch *branch;\n    SeafCommit *commit = NULL;\n    SeafDir *dir = NULL;\n    GList *l;\n    int ret = 0;\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    branch = repo->head;\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                            repo->id, repo->version,\n                                            branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\", repo->id, branch->commit_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    dir = seaf_fs_manager_get_seafdir_by_path(seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              commit->root_id,\n                                              repo_path, NULL);\n    if (!dir) {\n        seaf_warning (\"Path %s doesn't exist in repo %s.\\n\", repo_path, repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    for (l = dir->entries; l; l = l->next) {\n        SeafDirent *seaf_dent = (SeafDirent *) l->data;\n        /* FIXME: maybe we need to return stbuf */\n        filler(buf, seaf_dent->name, NULL, 0);\n    }\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    seaf_dir_free (dir);\n    return ret;\n}\n\nint do_readdir(SeafileSession *seaf, const char *path, void *buf,\n               fuse_fill_dir_t filler, off_t offset,\n               struct fuse_file_info *info)\n{\n    int n_parts;\n    char *user, *repo_id, *repo_path;\n    int ret = 0;\n\n    if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {\n        return -ENOENT;\n    }\n\n    switch (n_parts) {\n    case 0:\n        ret = readdir_root(seaf, buf, filler, offset, info);\n        break;\n    case 1:\n        ret = readdir_user(seaf, user, buf, filler, offset, info);\n        break;\n    case 2:\n    case 3:\n        ret = readdir_repo(seaf, user, repo_id, repo_path, buf, filler, offset, info);\n        break;\n    }\n\n    g_free (user);\n    g_free (repo_id);\n    g_free (repo_path);\n    return ret;\n}\n"
  },
  {
    "path": "fuse/repo-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n#include <glib/gstdio.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n\n#include \"seaf-db.h\"\n\n#define INDEX_DIR \"index\"\n\nstruct _SeafRepoManagerPriv {\n\n};\n\nstatic SeafRepo *\nload_repo (SeafRepoManager *manager, const char *repo_id);\n\ngboolean\nis_repo_id_valid (const char *id)\n{\n    if (!id)\n        return FALSE;\n\n    return is_uuid_valid (id);\n}\n\nSeafRepo*\nseaf_repo_new (const char *id, const char *name, const char *desc)\n{\n    SeafRepo* repo;\n\n    /* valid check */\n  \n    \n    repo = g_new0 (SeafRepo, 1);\n    memcpy (repo->id, id, 36);\n    repo->id[36] = '\\0';\n\n    repo->name = g_strdup(name);\n    repo->desc = g_strdup(desc);\n\n    repo->ref_cnt = 1;\n\n    return repo;\n}\n\nvoid\nseaf_repo_free (SeafRepo *repo)\n{\n    if (repo->name) g_free (repo->name);\n    if (repo->desc) g_free (repo->desc);\n    if (repo->category) g_free (repo->category);\n    if (repo->head) seaf_branch_unref (repo->head);\n    g_free (repo);\n}\n\nvoid\nseaf_repo_ref (SeafRepo *repo)\n{\n    g_atomic_int_inc (&repo->ref_cnt);\n}\n\nvoid\nseaf_repo_unref (SeafRepo *repo)\n{\n    if (!repo)\n        return;\n\n    if (g_atomic_int_dec_and_test (&repo->ref_cnt))\n        seaf_repo_free (repo);\n}\n\nstatic void\nset_head_common (SeafRepo *repo, SeafBranch *branch)\n{\n    if (repo->head)\n        seaf_branch_unref (repo->head);\n    repo->head = branch;\n    seaf_branch_ref(branch);\n}\n\nvoid\nseaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    repo->name = g_strdup (commit->repo_name);\n    repo->desc = g_strdup (commit->repo_desc);\n    repo->encrypted = commit->encrypted;\n    repo->no_local_history = commit->no_local_history;\n    repo->version = commit->version;\n}\n\nvoid\nseaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    commit->repo_name = g_strdup (repo->name);\n    commit->repo_desc = g_strdup (repo->desc);\n    commit->encrypted = repo->encrypted;\n    commit->no_local_history = repo->no_local_history;\n    commit->version = repo->version;\n}\n\nstatic gboolean\ncollect_commit (SeafCommit *commit, void *vlist, gboolean *stop)\n{\n    GList **commits = vlist;\n\n    /* The traverse function will unref the commit, so we need to ref it.\n     */\n    seaf_commit_ref (commit);\n    *commits = g_list_prepend (*commits, commit);\n    return TRUE;\n}\n\nGList *\nseaf_repo_get_commits (SeafRepo *repo)\n{\n    GList *branches;\n    GList *ptr;\n    SeafBranch *branch;\n    GList *commits = NULL;\n\n    branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);\n    if (branches == NULL) {\n        seaf_warning (\"Failed to get branch list of repo %s.\\n\", repo->id);\n        return NULL;\n    }\n\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        branch = ptr->data;\n        gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                                 repo->id,\n                                                                 repo->version,\n                                                                 branch->commit_id,\n                                                                 collect_commit,\n                                                                 &commits,\n                                                                 FALSE);\n        if (!res) {\n            for (ptr = commits; ptr != NULL; ptr = ptr->next)\n                seaf_commit_unref ((SeafCommit *)(ptr->data));\n            g_list_free (commits);\n            goto out;\n        }\n    }\n\n    commits = g_list_reverse (commits);\n\nout:\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        seaf_branch_unref ((SeafBranch *)ptr->data);\n    }\n    return commits;\n}\n\n#if 0\nstatic int \ncompare_repo (const SeafRepo *srepo, const SeafRepo *trepo)\n{\n    return g_strcmp0 (srepo->id, trepo->id);\n}\n#endif\n\nSeafRepoManager*\nseaf_repo_manager_new (SeafileSession *seaf)\n{\n    SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1);\n\n    mgr->priv = g_new0 (SeafRepoManagerPriv, 1);\n    mgr->seaf = seaf;\n\n    return mgr;\n}\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr)\n{\n    return 0;\n}\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr)\n{\n    return 0;\n}\n\nstatic gboolean\nrepo_exists_in_db (SeafDB *db, const char *id)\n{\n    char sql[256];\n    gboolean db_err = FALSE;\n\n    snprintf (sql, sizeof(sql), \"SELECT repo_id FROM Repo WHERE repo_id = '%s'\",\n              id);\n    return seaf_db_check_for_existence (db, sql, &db_err);\n}\n\nSeafRepo*\nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id)\n{\n    SeafRepo repo;\n    int len = strlen(id);\n\n    if (len >= 37)\n        return NULL;\n\n    memcpy (repo.id, id, len + 1);\n\n    if (repo_exists_in_db (manager->seaf->db, id)) {\n        SeafRepo *ret = load_repo (manager, id);\n        if (!ret)\n            return NULL;\n        /* seaf_repo_ref (ret); */\n        return ret;\n    }\n\n    return NULL;\n}\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id)\n{\n    SeafRepo repo;\n    memcpy (repo.id, id, 37);\n\n    return repo_exists_in_db (manager->seaf->db, id);\n}\n\nstatic void\nload_repo_commit (SeafRepoManager *manager,\n                  SeafRepo *repo,\n                  SeafBranch *branch)\n{\n    SeafCommit *commit;\n\n    commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,\n                                                        repo->id,\n                                                        branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Commit %s is missing\\n\", branch->commit_id);\n        repo->is_corrupted = TRUE;\n        return;\n    }\n\n    set_head_common (repo, branch);\n    seaf_repo_from_commit (repo, commit);\n\n    seaf_commit_unref (commit);\n}\n\nstatic gboolean\nload_virtual_info (SeafDBRow *row, void *vrepo_id)\n{\n    char *ret_repo_id = vrepo_id;\n    const char *origin_repo_id;\n\n    origin_repo_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (ret_repo_id, origin_repo_id, 37);\n\n    return FALSE;\n}\n\nchar *\nget_origin_repo_id (SeafRepoManager *mgr, const char *repo_id)\n{\n    char sql[256];\n    char origin_repo_id[37];\n\n    memset (origin_repo_id, 0, 37);\n\n    snprintf (sql, 256,\n              \"SELECT origin_repo FROM VirtualRepo \"\n              \"WHERE repo_id = '%s'\", repo_id);\n    seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, origin_repo_id);\n\n    if (origin_repo_id[0] != 0)\n        return g_strdup(origin_repo_id);\n    else\n        return NULL;\n}\n\nstatic SeafRepo *\nload_repo (SeafRepoManager *manager, const char *repo_id)\n{\n    SeafRepo *repo;\n    SeafBranch *branch;\n\n    repo = seaf_repo_new(repo_id, NULL, NULL);\n    if (!repo) {\n        seaf_warning (\"[repo mgr] failed to alloc repo.\\n\");\n        return NULL;\n    }\n\n    repo->manager = manager;\n\n    branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, \"master\");\n    if (!branch) {\n        seaf_warning (\"Failed to get master branch of repo %.8s.\\n\", repo_id);\n        repo->is_corrupted = TRUE;\n    } else {\n        load_repo_commit (manager, repo, branch);\n        seaf_branch_unref (branch);\n    }\n\n    if (repo->is_corrupted) {\n        seaf_warning (\"Repo %.8s is corrupted.\\n\", repo->id);\n        seaf_repo_free (repo);\n        return NULL;\n    }\n\n    char *origin_repo_id = get_origin_repo_id (manager, repo->id);\n    if (origin_repo_id)\n        memcpy (repo->store_id, origin_repo_id, 36);\n    else\n        memcpy (repo->store_id, repo->id, 36);\n    g_free (origin_repo_id);\n\n    return repo;\n}\n\nstatic gboolean\ncollect_repo_id (SeafDBRow *row, void *data)\n{\n    GList **p_ids = data;\n    const char *repo_id;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr)\n{\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT repo_id FROM Repo\");\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &ret) < 0)\n        return NULL;\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit)\n{\n    GList *id_list = NULL, *ptr;\n    GList *ret = NULL;\n    SeafRepo *repo;\n    char sql[256];\n\n    if (start == -1 && limit == -1)\n        snprintf (sql, 256, \"SELECT repo_id FROM Repo\");\n    else\n        snprintf (sql, 256, \"SELECT repo_id FROM Repo LIMIT %d, %d\", start, limit);\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &id_list) < 0)\n        return NULL;\n\n    for (ptr = id_list; ptr; ptr = ptr->next) {\n        char *repo_id = ptr->data;\n        repo = seaf_repo_manager_get_repo (mgr, repo_id);\n        if (repo != NULL)\n            ret = g_list_prepend (ret, repo);\n    }\n\n    string_list_free (id_list);\n    return g_list_reverse (ret);\n}\n\nGList *\nseaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,\n                                      const char *email)\n{\n    GList *id_list = NULL, *ptr;\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT repo_id FROM RepoOwner WHERE owner_id='%s'\",\n              email);\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &id_list) < 0)\n        return NULL;\n\n    for (ptr = id_list; ptr; ptr = ptr->next) {\n        char *repo_id = ptr->data;\n        SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id);\n        if (repo != NULL)\n            ret = g_list_prepend (ret, repo);\n    }\n\n    string_list_free (id_list);\n\n    return ret;\n}\n\ngboolean\nseaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id)\n{\n    char sql[256];\n    gboolean db_err;\n\n    snprintf (sql, 256,\n              \"SELECT 1 FROM VirtualRepo WHERE repo_id = '%s'\", repo_id);\n    return seaf_db_check_for_existence (seaf->db, sql, &db_err);\n}\n"
  },
  {
    "path": "fuse/repo-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_REPO_MGR_H\n#define SEAF_REPO_MGR_H\n\n#include <pthread.h>\n\n#include \"seafile-object.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n\nstruct _SeafRepoManager;\ntypedef struct _SeafRepo SeafRepo;\n\nstruct _SeafRepo {\n    struct _SeafRepoManager *manager;\n\n    gchar       id[37];\n    gchar      *name;\n    gchar      *desc;\n    gchar      *category;       /* not used yet */\n    gboolean    encrypted;\n    int         enc_version;\n    gchar       magic[33];       /* hash(repo_id + passwd), key stretched. */\n    gboolean    no_local_history;\n\n    SeafBranch *head;\n\n    gboolean    is_corrupted;\n    gboolean    delete_pending;\n    int         ref_cnt;\n\n    int version;\n    /* Used to access fs and block sotre.\n     * This id is different from repo_id when this repo is virtual.\n     * Virtual repos share fs and block store with its origin repo.\n     * However, commit store for each repo is always independent.\n     * So always use repo_id to access commit store.\n     */\n    gchar       store_id[37];\n};\n\ngboolean is_repo_id_valid (const char *id);\n\nSeafRepo* \nseaf_repo_new (const char *id, const char *name, const char *desc);\n\nvoid\nseaf_repo_free (SeafRepo *repo);\n\nvoid\nseaf_repo_ref (SeafRepo *repo);\n\nvoid\nseaf_repo_unref (SeafRepo *repo);\n\ntypedef struct _SeafRepoManager SeafRepoManager;\ntypedef struct _SeafRepoManagerPriv SeafRepoManagerPriv;\n\nstruct _SeafRepoManager {\n    struct _SeafileSession *seaf;\n\n    SeafRepoManagerPriv *priv;\n};\n\nSeafRepoManager* \nseaf_repo_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo);\n\nint\nseaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo);\n\nSeafRepo* \nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id);\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id);\n\nGList* \nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit);\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr);\n\nGList *\nseaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,\n                                      const char *email);\n\ngboolean\nseaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id);\n\n#endif\n"
  },
  {
    "path": "fuse/seaf-fuse.c",
    "content": "#include \"common.h\"\n\n#include <unistd.h>\n#include <getopt.h>\n\n#define FUSE_USE_VERSION  26\n#include <fuse.h>\n#include <fuse_opt.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <seaf-db.h>\n\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"seaf-fuse.h\"\n\nSeafileSession *seaf = NULL;\n\nstatic char *parse_repo_id (const char *repo_id_name)\n{\n    if (strlen(repo_id_name) < 36)\n        return NULL;\n    return g_strndup(repo_id_name, 36);\n}\n\n/*\n * Path format can be:\n * 1. / --> list all users\n * 2. /user --> list libraries owned by user\n * 3. /user/repo-id_name --> list root of the library\n * 4. /user/repo-id_name/repo_path --> list library content\n */\nint parse_fuse_path (const char *path,\n                     int *n_parts, char **user, char **repo_id, char **repo_path)\n{\n    char **tokens;\n    int n;\n    int ret = 0;\n\n    *user = NULL;\n    *repo_id = NULL;\n    *repo_path = NULL;\n\n    if (*path == '/')\n        ++path;\n\n    tokens = g_strsplit (path, \"/\", 3);\n    n = g_strv_length (tokens);\n    *n_parts = n;\n\n    switch (n) {\n    case 0:\n        break;\n    case 1:\n        *user = g_strdup(tokens[0]);\n        break;\n    case 2:\n        *repo_id = parse_repo_id(tokens[1]);\n        if (*repo_id == NULL) {\n            ret = -1;\n            break;\n        }\n        *user = g_strdup(tokens[0]);\n        *repo_path = g_strdup(\"/\");\n        break;\n    case 3:\n        *repo_id = parse_repo_id(tokens[1]);\n        if (*repo_id == NULL) {\n            ret = -1;\n            break;\n        }\n        *user = g_strdup(tokens[0]);\n        *repo_path = g_strdup(tokens[2]);\n        break;\n    }\n\n    g_strfreev (tokens);\n    return ret;\n}\n\nstatic int seaf_fuse_getattr(const char *path, struct stat *stbuf)\n{\n    memset(stbuf, 0, sizeof(struct stat));\n    return do_getattr(seaf, path, stbuf);\n}\n\nstatic int seaf_fuse_readdir(const char *path, void *buf,\n                             fuse_fill_dir_t filler, off_t offset,\n                             struct fuse_file_info *info)\n{\n    filler(buf, \".\", NULL, 0);\n    filler(buf, \"..\", NULL, 0);\n\n    return do_readdir(seaf, path, buf, filler, offset, info);\n}\n\nstatic int seaf_fuse_open(const char *path, struct fuse_file_info *info)\n{\n    int n_parts;\n    char *user, *repo_id, *repo_path;\n    SeafRepo *repo = NULL;\n    SeafBranch *branch = NULL;\n    SeafCommit *commit = NULL;\n    guint32 mode = 0;\n    int ret = 0;\n\n    /* Now we only support read-only mode */\n    if ((info->flags & 3) != O_RDONLY)\n        return -EACCES;\n\n    if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {\n        seaf_warning (\"Invalid input path %s.\\n\", path);\n        return -ENOENT;\n    }\n\n    if (n_parts != 2 && n_parts != 3) {\n        seaf_warning (\"Invalid input path for open: %s.\\n\", path);\n        ret = -EACCES;\n        goto out;\n    }\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    branch = repo->head;\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                            repo->id,\n                                            repo->version,\n                                            branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\", repo->id, branch->commit_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    char *id = seaf_fs_manager_path_to_obj_id(seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              commit->root_id,\n                                              repo_path, &mode, NULL);\n    if (!id) {\n        seaf_warning (\"Path %s doesn't exist in repo %s.\\n\", repo_path, repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n    g_free (id);\n\n    if (!S_ISREG(mode))\n        return -EACCES;\n\nout:\n    g_free (user);\n    g_free (repo_id);\n    g_free (repo_path);\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    return ret;\n}\n\nstatic int seaf_fuse_read(const char *path, char *buf, size_t size,\n                          off_t offset, struct fuse_file_info *info)\n{\n    int n_parts;\n    char *user, *repo_id, *repo_path;\n    SeafRepo *repo = NULL;\n    SeafBranch *branch = NULL;\n    SeafCommit *commit = NULL;\n    Seafile *file = NULL;\n    char *file_id = NULL;\n    int ret = 0;\n\n    /* Now we only support read-only mode */\n    if ((info->flags & 3) != O_RDONLY)\n        return -EACCES;\n\n    if (parse_fuse_path (path, &n_parts, &user, &repo_id, &repo_path) < 0) {\n        seaf_warning (\"Invalid input path %s.\\n\", path);\n        return -ENOENT;\n    }\n\n    if (n_parts != 2 && n_parts != 3) {\n        seaf_warning (\"Invalid input path for open: %s.\\n\", path);\n        ret = -EACCES;\n        goto out;\n    }\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s.\\n\", repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    branch = repo->head;\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                            repo->id,\n                                            repo->version,\n                                            branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\", repo->id, branch->commit_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    file_id = seaf_fs_manager_get_seafile_id_by_path(seaf->fs_mgr,\n                                                     repo->store_id, repo->version,\n                                                     commit->root_id,\n                                                     repo_path, NULL);\n    if (!file_id) {\n        seaf_warning (\"Path %s doesn't exist in repo %s.\\n\", repo_path, repo_id);\n        ret = -ENOENT;\n        goto out;\n    }\n\n    file = seaf_fs_manager_get_seafile(seaf->fs_mgr,\n                                       repo->store_id, repo->version, file_id);\n    if (!file) {\n        ret = -ENOENT;\n        goto out;\n    }\n\n    ret = read_file(seaf, repo->store_id, repo->version,\n                    file, buf, size, offset, info);\n    seafile_unref (file);\n\nout:\n    g_free (user);\n    g_free (repo_id);\n    g_free (repo_path);\n    g_free (file_id);\n    seaf_repo_unref (repo);\n    seaf_commit_unref (commit);\n    return ret;\n}\n\nstruct options {\n    char *central_config_dir;\n    char *config_dir;\n    char *seafile_dir;\n    char *log_file;\n} options;\n\n#define SEAF_FUSE_OPT_KEY(t, p, v) { t, offsetof(struct options, p), v }\n\nenum {\n    KEY_VERSION,\n    KEY_HELP,\n};\n\nstatic struct fuse_opt seaf_fuse_opts[] = {\n    SEAF_FUSE_OPT_KEY(\"-c %s\", config_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"--config %s\", config_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"-F %s\", central_config_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"--central-config-dir %s\", central_config_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"-d %s\", seafile_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"--seafdir %s\", seafile_dir, 0),\n    SEAF_FUSE_OPT_KEY(\"-l %s\", log_file, 0),\n    SEAF_FUSE_OPT_KEY(\"--logfile %s\", log_file, 0),\n\n    FUSE_OPT_KEY(\"-V\", KEY_VERSION),\n    FUSE_OPT_KEY(\"--version\", KEY_VERSION),\n    FUSE_OPT_KEY(\"-h\", KEY_HELP),\n    FUSE_OPT_KEY(\"--help\", KEY_HELP),\n    FUSE_OPT_END\n};\n\nstatic struct fuse_operations seaf_fuse_ops = {\n    .getattr = seaf_fuse_getattr,\n    .readdir = seaf_fuse_readdir,\n    .open    = seaf_fuse_open,\n    .read    = seaf_fuse_read,\n};\n\nint main(int argc, char *argv[])\n{\n    struct fuse_args args = FUSE_ARGS_INIT(argc, argv);\n    const char *debug_str = NULL;\n    char *config_dir = DEFAULT_CONFIG_DIR;\n    char *central_config_dir = NULL;\n    char *seafile_dir = NULL;\n    char *logfile = NULL;\n    char *ccnet_debug_level_str = \"info\";\n    char *seafile_debug_level_str = \"debug\";\n    int ret;\n\n    memset(&options, 0, sizeof(struct options));\n\n    if (fuse_opt_parse(&args, &options, seaf_fuse_opts, NULL) == -1) {\n        seaf_warning(\"Parse argument Failed\\n\");\n        exit(1);\n    }\n\n#if !GLIB_CHECK_VERSION(2,36,0)\n    g_type_init();\n#endif\n\n    config_dir = options.config_dir ? : DEFAULT_CONFIG_DIR;\n    config_dir = ccnet_expand_path (config_dir);\n    central_config_dir = options.central_config_dir;\n\n    if (!debug_str)\n        debug_str = g_getenv(\"SEAFILE_DEBUG\");\n    seafile_debug_set_flags_string(debug_str);\n\n    if (!options.seafile_dir)\n        seafile_dir = g_build_filename(config_dir, \"seafile\", NULL);\n    else\n        seafile_dir = options.seafile_dir;\n\n    if (!options.log_file)\n        logfile = g_build_filename(seafile_dir, \"seaf-fuse.log\", NULL);\n    else\n        logfile = options.log_file;\n\n    if (seafile_log_init(logfile, ccnet_debug_level_str,\n                         seafile_debug_level_str, \"seaf-fuse\") < 0) {\n        fprintf (stderr, \"Failed to init log.\\n\");\n        exit(1);\n    }\n\n    seaf = seafile_session_new(central_config_dir, seafile_dir, config_dir);\n    if (!seaf) {\n        seaf_warning(\"Failed to create seafile session.\\n\");\n        exit(1);\n    }\n\n    if (seafile_session_init(seaf) < 0) {\n        seaf_warning(\"Failed to init seafile session.\\n\");\n        exit(1);\n    }\n\n    set_syslog_config (seaf->config);\n\n    ret = fuse_main(args.argc, args.argv, &seaf_fuse_ops, NULL);\n    fuse_opt_free_args(&args);\n    return ret;\n}\n"
  },
  {
    "path": "fuse/seaf-fuse.h",
    "content": "#ifndef SEAF_FUSE_H\n#define SEAF_FUSE_H\n\n#include \"seafile-session.h\"\n\nint parse_fuse_path (const char *path,\n                     int *n_parts, char **user, char **repo_id, char **repo_path);\n\nSeafDirent *\nfuse_get_dirent_by_path (SeafFSManager *mgr,\n                         const char *repo_id,\n                         int version,\n                         const char *root_id,\n                         const char *path);\n\n/* file.c */\nint read_file(SeafileSession *seaf, const char *store_id, int version,\n              Seafile *file, char *buf, size_t size,\n              off_t offset, struct fuse_file_info *info);\n\n/* getattr.c */\nint do_getattr(SeafileSession *seaf, const char *path, struct stat *stbuf);\n\n/* readdir.c */\nint do_readdir(SeafileSession *seaf, const char *path, void *buf,\n               fuse_fill_dir_t filler, off_t offset,\n               struct fuse_file_info *info);\n\n#endif /* SEAF_FUSE_H */\n"
  },
  {
    "path": "fuse/seafile-session.c",
    "content": "#include \"common.h\"\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include <utils.h>\n#include <locale.h>\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n\n#include \"log.h\"\n\nstatic int\nread_excluded_users (SeafileSession *session);\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir,\n                    const char *seafile_dir,\n                    const char *ccnet_dir)\n{\n    char *abs_central_config_dir = NULL;\n    char *abs_seafile_dir;\n    char *abs_ccnet_dir = NULL;\n    char *tmp_file_dir;\n    char *config_file_path;\n    struct stat st;\n    GKeyFile *config;\n    SeafileSession *session = NULL;\n\n    abs_ccnet_dir = ccnet_expand_path (ccnet_dir);\n    abs_seafile_dir = ccnet_expand_path (seafile_dir);\n    tmp_file_dir = g_build_filename(abs_seafile_dir, \"tmpfiles\", NULL);\n    if (central_config_dir) {\n        abs_central_config_dir = ccnet_expand_path (central_config_dir);\n    }\n    config_file_path = g_build_filename(\n        abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,\n        \"seafile.conf\", NULL);\n\n    if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning (\"Seafile data dir %s does not exist and is unable to create\\n\",\n                   abs_seafile_dir);\n        goto onerror;\n    }\n\n    if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning(\"Seafile tmp dir %s does not exist and is unable to create\\n\",\n                  tmp_file_dir);\n        goto onerror;\n    }\n\n    if (g_stat(abs_ccnet_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning(\"Ccnet dir %s does not exist and is unable to create\\n\",\n                  abs_ccnet_dir);\n        goto onerror;\n    }\n\n    GError *error = NULL;\n    config = g_key_file_new ();\n    if (!g_key_file_load_from_file (config, config_file_path, \n                                    G_KEY_FILE_NONE, &error)) {\n        seaf_warning (\"Failed to load config file.\\n\");\n        g_free (config_file_path);\n        g_key_file_free (config);\n        goto onerror;\n    }\n    g_free (config_file_path);\n\n    session = g_new0(SeafileSession, 1);\n    session->seaf_dir = abs_seafile_dir;\n    session->ccnet_dir = abs_ccnet_dir;\n    session->tmp_file_dir = tmp_file_dir;\n    session->config = config;\n    session->excluded_users = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                     g_free, NULL);\n\n    if (load_database_config (session) < 0) {\n        seaf_warning (\"Failed to load database config.\\n\");\n        goto onerror;\n    }\n\n    if (load_ccnet_database_config (session) < 0) {\n        seaf_warning (\"Failed to load ccnet database config.\\n\");\n        goto onerror;\n    }\n\n    if (read_excluded_users (session) < 0) {\n        seaf_warning (\"Failed to load excluded users.\\n\");\n        goto onerror;\n    }\n\n    session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);\n    if (!session->fs_mgr)\n        goto onerror;\n    session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);\n    if (!session->block_mgr)\n        goto onerror;\n    session->commit_mgr = seaf_commit_manager_new (session);\n    if (!session->commit_mgr)\n        goto onerror;\n    session->repo_mgr = seaf_repo_manager_new (session);\n    if (!session->repo_mgr)\n        goto onerror;\n    session->branch_mgr = seaf_branch_manager_new (session);\n    if (!session->branch_mgr)\n        goto onerror;\n    session->user_mgr = ccnet_user_manager_new (session);\n    if (!session->user_mgr)\n        goto onerror;\n    session->group_mgr = ccnet_group_manager_new (session);\n    if (!session->group_mgr)\n        goto onerror;\n\n    return session;\n\nonerror:\n    free (abs_seafile_dir);\n    free (abs_ccnet_dir);\n    g_free (session);\n    return NULL;    \n}\n\nstatic int\nread_excluded_users (SeafileSession *session)\n{\n    char *users;\n    int l, i;\n    char *hash_value;\n\n    users = seaf_key_file_get_string (session->config, \"fuse\", \"excluded_users\", NULL);\n    if (!users)\n        return 0;\n\n    char **parts = g_strsplit_set(users, \" ,\", 0);\n    l = g_strv_length(parts);\n    if (l > 0)\n        hash_value = g_new0(char, 1);\n\n    for (i = 0; i < l; i++) {\n        if (g_strcmp0(parts[i], \"\") == 0)\n            continue;\n        g_hash_table_insert (session->excluded_users, g_strdup(parts[i]), hash_value);\n    }\n\n    g_strfreev (parts);\n    g_free (users);\n\n    return 0;\n}\n\nint\nseafile_session_init (SeafileSession *session)\n{\n    if (seaf_commit_manager_init (session->commit_mgr) < 0)\n        return -1;\n\n    if (seaf_fs_manager_init (session->fs_mgr) < 0)\n        return -1;\n\n    if (seaf_branch_manager_init (session->branch_mgr) < 0)\n        return -1;\n\n    if (seaf_repo_manager_init (session->repo_mgr) < 0)\n        return -1;\n\n    if (ccnet_user_manager_prepare (session->user_mgr) < 0) {\n        seaf_warning (\"Failed to init user manager.\\n\");\n        return -1;\n    }\n\n    if (ccnet_group_manager_prepare (session->group_mgr) < 0) {\n        seaf_warning (\"Failed to init group manager.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_session_start (SeafileSession *session)\n{\n    return 0;\n}\n"
  },
  {
    "path": "fuse/seafile-session.h",
    "content": "#ifndef SEAFILE_SESSION_H\n#define SEAFILE_SESSION_H\n\n#include <stdint.h>\n#include <glib.h>\n\n#include <seaf-db.h>\n\n#include \"block-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"commit-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"user-mgr.h\"\n#include \"group-mgr.h\"\n#include \"org-mgr.h\"\n\ntypedef struct _SeafileSession SeafileSession;\n\nstruct _SeafileSession {\n    char                *seaf_dir;\n    char                *ccnet_dir;\n    char                *tmp_file_dir;\n    /* Config that's only loaded on start */\n    GKeyFile            *config;\n    SeafDB              *db;\n    SeafDB              *ccnet_db;\n    SeafDB              *seahub_db;\n\n    SeafBlockManager    *block_mgr;\n    SeafFSManager       *fs_mgr;\n    SeafBranchManager   *branch_mgr;\n    SeafCommitManager   *commit_mgr;\n    SeafRepoManager     *repo_mgr;\n    CcnetUserManager    *user_mgr;\n    CcnetGroupManager   *group_mgr;\n    CcnetOrgManager     *org_mgr;\n\n    GHashTable          *excluded_users;\n\n    gboolean create_tables;\n    gboolean ccnet_create_tables;\n};\n\nextern SeafileSession *seaf;\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir,\n                    const char *seafile_dir,\n                    const char *ccnet_dir);\n\nint\nseafile_session_init (SeafileSession *session);\n\nint\nseafile_session_start (SeafileSession *session);\n\n#endif\n"
  },
  {
    "path": "include/Makefile.am",
    "content": "\nnoinst_HEADERS = seafile-rpc.h seafile-error.h\n"
  },
  {
    "path": "include/seafile-error.h",
    "content": "#ifndef SEAFILE_ERROR_H\n#define SEAFILE_ERROR_H\n\n#define SEAF_ERR_GENERAL        500\n#define SEAF_ERR_BAD_REPO       501\n#define SEAF_ERR_BAD_COMMIT     502\n#define SEAF_ERR_BAD_ARGS       503\n#define SEAF_ERR_INTERNAL       504\n#define SEAF_ERR_BAD_FILE       505\n#define SEAF_ERR_BAD_RELAY      506\n#define SEAF_ERR_LIST_COMMITS   507\n#define SEAF_ERR_REPO_AUTH      508\n#define SEAF_ERR_GC_NOT_STARTED 509\n#define SEAF_ERR_MONITOR_NOT_CONNECTED 510\n#define SEAF_ERR_BAD_DIR_ID     511\n#define SEAF_ERR_NO_WORKTREE    512\n#define SEAF_ERR_BAD_PEER_ID    513\n#define SEAF_ERR_REPO_LOCKED    514\n#define SEAF_ERR_DIR_MISSING    515\n#define SEAF_ERR_PATH_NO_EXIST  516 /* the dir or file pointed by this path not exists */\n\n#define POST_FILE_ERR_FILENAME  517\n#define POST_FILE_ERR_BLOCK_MISSING 518\n#define POST_FILE_ERR_QUOTA_FULL 519\n#define SEAF_ERR_CONCURRENT_UPLOAD 520\n#define SEAF_ERR_FILES_WITH_SAME_NAME 521\n#define SEAF_ERR_GC_CONFLICT 522\n\n#endif\n"
  },
  {
    "path": "include/seafile-rpc.h",
    "content": "\n#ifndef _SEAFILE_RPC_H\n#define _SEAFILE_RPC_H\n\n#include \"seafile-object.h\"\n\n/**\n * seafile_get_session_info:\n *\n * Returns: a SeafileSessionInfo object.\n */\nGObject *\nseafile_get_session_info (GError **error);\n\n/**\n * seafile_get_repo_list:\n *\n * Returns repository list.\n */\nGList* seafile_get_repo_list (int start, int limit, const char *order_by, int ret_virt_repo, GError **error);\n\ngint64\nseafile_count_repos (GError **error);\n\n/**\n * seafile_get_trash_repo_list:\n *\n * Returns deleted repository list.\n */\nGList* seafile_get_trash_repo_list(int start, int limit, GError **error);\n\nint\nseafile_del_repo_from_trash (const char *repo_id, GError **error);\n\nint\nseafile_restore_repo_from_trash (const char *repo_id, GError **error);\n\nGList *\nseafile_get_trash_repos_by_owner (const char *owner, GError **error);\n\nint\nseafile_empty_repo_trash (GError **error);\n\nint\nseafile_empty_repo_trash_by_owner (const char *owner, GError **error);\n\n/**\n * seafile_get_commit_list:\n *\n * @limit: if limit <= 0, all commits start from @offset will be returned.\n *\n * Returns: commit list of a given repo.\n *\n * Possible Error:\n *    1. Bad Argument\n *    2. No head and branch master\n *    3. Failed to list commits\n */\nGList* seafile_get_commit_list (const gchar *repo,\n                                int offset,\n                                int limit,\n                                GError **error);\n\n/**\n * seafile_get_commit:\n * @id: the commit id.\n *\n * Returns: the commit object.\n */\nGObject* seafile_get_commit (const char *repo_id, int version,\n                             const gchar *id, GError **error);\n\n/**\n * seafile_get_repo:\n *\n * Returns: repo\n */\nGObject* seafile_get_repo (const gchar* id, GError **error);\n\nGObject *\nseafile_get_repo_sync_task (const char *repo_id, GError **error);\n\n/**\n * seafile_get_repo_sync_info:\n */\nGObject *\nseafile_get_repo_sync_info (const char *repo_id, GError **error);\n\nGList*\nseafile_get_repo_sinfo (const char *repo_id, GError **error);\n\n/* [seafile_get_config] returns the value of the config entry whose name is\n * [key] in config.db\n */\nchar *seafile_get_config (const char *key, GError **error);\n\n/* [seafile_set_config] set the value of config key in config.db; old value\n * would be overwritten. */\nint seafile_set_config (const char *key, const char *value, GError **error);\n\nint\nseafile_set_config_int (const char *key, int value, GError **error);\n\nint\nseafile_get_config_int (const char *key, GError **error);\n\nint\nseafile_set_upload_rate_limit (int limit, GError **error);\n\nint\nseafile_set_download_rate_limit (int limit, GError **error);\n\n/**\n * seafile_destroy_repo:\n * @repo_id: repository id.\n */\nint seafile_destroy_repo (const gchar *repo_id, GError **error);\n\nint\nseafile_unsync_repos_by_account (const char *server_addr, const char *email, GError **error);\n\nint\nseafile_remove_repo_tokens_by_account (const char *server_addr, const char *email, GError **error);\n\nint\nseafile_set_repo_token (const char *repo_id, const char *token, GError **error);\n\nint\nseafile_get_download_rate(GError **error);\n\nint\nseafile_get_upload_rate(GError **error);\n\n/**\n * seafile_edit_repo:\n * @repo_id: repository id.\n * @name: new name of the repository, NULL if unchanged.\n * @description: new description of the repository, NULL if unchanged.\n */\nint seafile_edit_repo (const gchar *repo_id, \n\t\t       const gchar *name, \n\t\t       const gchar *description,\n                       const gchar *user,\n\t\t       GError **error);\n\nint\nseafile_change_repo_passwd (const char *repo_id,\n                            const char *old_passwd,\n                            const char *new_passwd,\n                            const char *user,\n                            GError **error);\n\nint\nseafile_upgrade_repo_pwd_hash_algorithm (const char *repo_id,\n                                         const char *user,\n                                         const char *passwd,\n                                         const char *pwd_hash_algo,\n                                         const char *pwd_hash_params,\n                                         GError **error);\n\n/**\n * seafile_repo_size:\n * \n * Returns: the size of a repo\n *\n * Possible Error:\n *   1. Bad Argument\n *   2. No local branch (No local branch record in branch.db)\n *   3. Database error\n *   4. Calculate branch size error\n */\ngint64\nseafile_repo_size(const gchar *repo_id, GError **error);\n\nint\nseafile_repo_last_modify(const char *repo_id, GError **error);\n\nint seafile_set_repo_lantoken (const gchar *repo_id,\n                               const gchar *token,\n                               GError **error);\n\ngchar* seafile_get_repo_lantoken (const gchar *repo_id,\n                                  GError **error);\n\nint\nseafile_set_repo_property (const char *repo_id,\n                           const char *key,\n                           const char *value,\n                           GError **error);\n\ngchar *\nseafile_get_repo_property (const char *repo_id,\n                           const char *key,\n                           GError **error);\n\nchar *\nseafile_get_repo_relay_address (const char *repo_id,\n                                GError **error);\n\nchar *\nseafile_get_repo_relay_port (const char *repo_id,\n                             GError **error);\n\nint\nseafile_update_repo_relay_info (const char *repo_id,\n                                const char *new_addr,\n                                const char *new_port,\n                                GError **error);\n\nint\nseafile_update_repos_server_host (const char *old_host,\n                                  const char *new_host,\n                                  const char *new_server_url,\n                                  GError **error);\n\nint seafile_disable_auto_sync (GError **error);\n\nint seafile_enable_auto_sync (GError **error);\n\nint seafile_is_auto_sync_enabled (GError **error);\n\nchar *\nseafile_get_path_sync_status (const char *repo_id,\n                              const char *path,\n                              int is_dir,\n                              GError **error);\n\nint\nseafile_mark_file_locked (const char *repo_id, const char *path, GError **error);\n\nint\nseafile_mark_file_unlocked (const char *repo_id, const char *path, GError **error);\n\nchar *\nseafile_get_server_property (const char *server_url, const char *key, GError **error);\n\nint\nseafile_set_server_property (const char *server_url,\n                             const char *key,\n                             const char *value,\n                             GError **error);\n\n/**\n * seafile_list_dir:\n * List a directory.\n *\n * Returns: a list of dirents.\n * \n * @limit: if limit <= 0, all dirents start from @offset will be returned.\n */\nGList * seafile_list_dir (const char *repo_id,\n                          const char *dir_id, int offset, int limit, GError **error);\n\n/**\n * seafile_list_file_blocks:\n * List the blocks of a file.\n *\n * Returns: a list of block ids speprated by '\\n'.\n * \n * @limit: if limit <= 0, all blocks start from @offset will be returned.\n */\nchar * seafile_list_file_blocks (const char *repo_id,\n                                 const char *file_id,\n                                 int offset, int limit,\n                                 GError **error);\n\n/**\n * seafile_list_dir_by_path:\n * List a directory in a commit by the path of the directory.\n *\n * Returns: a list of dirents.\n */\nGList * seafile_list_dir_by_path (const char *repo_id,\n                                  const char *commit_id, const char *path, GError **error);\n\n/**\n * seafile_get_dir_id_by_commit_and_path:\n * Get the dir_id of the path\n *\n * Returns: the dir_id of the path\n */\nchar * seafile_get_dir_id_by_commit_and_path (const char *repo_id,\n                                              const char *commit_id,\n                                              const char *path,\n                                              GError **error);\n\n/**\n * seafile_revert:\n * Reset the repo to a previous state by creating a new commit.\n */\nint seafile_revert (const char *repo_id, const char *commit, GError **error);\n\nchar *\nseafile_gen_default_worktree (const char *worktree_parent,\n                              const char *repo_name,\n                              GError **error);\nint\nseafile_check_path_for_clone(const char *path, GError **error);\n\n/**\n * seafile_clone:\n *\n * Fetch a new repo and then check it out.\n */\nchar *\nseafile_clone (const char *repo_id, \n               int repo_version,\n               const char *peer_id,\n               const char *repo_name,\n               const char *worktree,\n               const char *token,\n               const char *passwd,\n               const char *magic,\n               const char *peer_addr,\n               const char *peer_port,\n               const char *email,\n               const char *random_key,\n               int enc_version,\n               const char *more_info,\n               GError **error);\n\nchar *\nseafile_download (const char *repo_id, \n                  int repo_version,\n                  const char *peer_id,\n                  const char *repo_name,\n                  const char *wt_parent,\n                  const char *token,\n                  const char *passwd,\n                  const char *magic,\n                  const char *peer_addr,\n                  const char *peer_port,\n                  const char *email,\n                  const char *random_key,\n                  int enc_version,\n                  const char *more_info,\n                  GError **error);\n\nint\nseafile_cancel_clone_task (const char *repo_id, GError **error);\n\nint\nseafile_remove_clone_task (const char *repo_id, GError **error);\n\n/**\n * seafile_get_clone_tasks:\n *\n * Get a list of clone tasks.\n */\nGList *\nseafile_get_clone_tasks (GError **error);\n\n/**\n * seafile_sync:\n *\n * Sync a repo with relay.\n */\nint seafile_sync (const char *repo_id, const char *peer_id, GError **error);\n\n/**\n * seafile_get_total_block_size:\n *\n * Get the sum of size of all the blocks.\n */\ngint64\nseafile_get_total_block_size (GError **error);\n\n\n/**\n * seafile_get_commit_tree_block_number:\n *\n * Get the number of blocks belong to the commit tree.\n *\n * @commit_id: the head of the commit tree.\n *\n * Returns: -1 if the calculation is in progress, -2 if error, >=0 otherwise.\n */\nint\nseafile_get_commit_tree_block_number (const char *commit_id, GError **error);\n\n\n/**\n * seafile_gc:\n * Start garbage collection.\n */\nint\nseafile_gc (GError **error);\n\n/**\n * seafile_gc_get_progress:\n * Get progress of GC.\n *\n * Returns:\n *     progress of GC in precentage.\n *     -1 if GC is not running.\n */\n/* int */\n/* seafile_gc_get_progress (GError **error); */\n\n/* -----------------  Task Related --------------  */\n\n/**\n * seafile_find_transfer:\n *\n * Find a non finished task of a repo\n */\nGObject *\nseafile_find_transfer_task (const char *repo_id, GError *error);\n\n\nint seafile_cancel_task (const gchar *task_id, int task_type, GError **error);\n\n/**\n * Remove finished upload task\n */\nint seafile_remove_task (const char *task_id, int task_type, GError **error);\n\n\n/* ------------------ Relay specific RPC calls. ------------ */\n\n/**\n * seafile_diff:\n *\n * Show the difference between @old commit and @new commit. If @old is NULL, then\n * show the difference between @new commit and its parent.\n *\n * @old and @new can also be branch name.\n */\nGList *\nseafile_diff (const char *repo_id, const char *old, const char *new,\n              int fold_dir_results, GError **error);\n\nGList *\nseafile_branch_gets (const char *repo_id, GError **error);\n\n/**\n * Return 1 if user is the owner of repo, otherwise return 0.\n */\nint\nseafile_is_repo_owner (const char *email, const char *repo_id,\n                       GError **error);\n\nint\nseafile_set_repo_owner(const char *repo_id, const char *email,\n                       GError **error);\n\n/**\n * Return owner id of repo\n */\nchar *\nseafile_get_repo_owner(const char *repo_id, GError **error);\n\nGList *\nseafile_get_orphan_repo_list(GError **error);\n\nGList *\nseafile_list_owned_repos (const char *email, int ret_corrupted, int start, int limit,\n                          GError **error);\n\nGList *\nseafile_search_repos_by_name(const char *name, GError **error);\n\n/**\n * seafile_add_chunk_server:\n * @server: ID for the chunk server.\n *\n * Add a chunk server on a relay server.\n */\nint seafile_add_chunk_server (const char *server, GError **error);\n\n/**\n * seafile_del_chunk_server:\n * @server: ID for the chunk server.\n *\n * Delete a chunk server on a relay server.\n */\nint seafile_del_chunk_server (const char *server, GError **error);\n\n/**\n * seafile_list_chunk_servers:\n *\n * List chunk servers set on a relay server.\n */\nchar *seafile_list_chunk_servers (GError **error);\n\ngint64 seafile_get_user_quota_usage (const char *email, GError **error);\n\ngint64 seafile_get_user_share_usage (const char *email, GError **error);\n\ngint64\nseafile_server_repo_size(const char *repo_id, GError **error);\n\nint\nseafile_repo_set_access_property (const char *repo_id, const char *ap,\n                                  GError **error);\n\nchar *\nseafile_repo_query_access_property (const char *repo_id, GError **error);\n\nchar *\nseafile_web_get_access_token (const char *repo_id,\n                              const char *obj_id,\n                              const char *op,\n                              const char *username,\n                              int use_onetime,\n                              GError **error);\n\nGObject *\nseafile_web_query_access_token (const char *token, GError **error);\n\nchar *\nseafile_query_zip_progress (const char *token, GError **error);\n\nint\nseafile_cancel_zip_task (const char *token, GError **error);\n\nGObject *\nseafile_get_checkout_task (const char *repo_id, GError **error);\n\nGList *\nseafile_get_sync_task_list (GError **error);\n\nchar *\nseafile_share_subdir_to_user (const char *repo_id,\n                              const char *path,\n                              const char *owner,\n                              const char *share_user,\n                              const char *permission,\n                              const char *passwd,\n                              GError **error);\n\nint\nseafile_unshare_subdir_for_user (const char *repo_id,\n                                 const char *path,\n                                 const char *owner,\n                                 const char *share_user,\n                                 GError **error);\n\nint\nseafile_update_share_subdir_perm_for_user (const char *repo_id,\n                                           const char *path,\n                                           const char *owner,\n                                           const char *share_user,\n                                           const char *permission,\n                                           GError **error);\n\nint\nseafile_add_share (const char *repo_id, const char *from_email,\n                   const char *to_email, const char *permission,\n                   GError **error);\n\nGList *\nseafile_list_share_repos (const char *email, const char *type,\n                          int start, int limit, GError **error);\n\nGList *\nseafile_list_repo_shared_to (const char *from_user, const char *repo_id,\n                             GError **error);\n\nGList *\nseafile_list_repo_shared_group (const char *from_user, const char *repo_id,\n                                GError **error);\n\nint\nseafile_remove_share (const char *repo_id, const char *from_email,\n                      const char *to_email, GError **error);\n\nchar *\nseafile_share_subdir_to_group (const char *repo_id,\n                               const char *path,\n                               const char *owner,\n                               int share_group,\n                               const char *permission,\n                               const char *passwd,\n                               GError **error);\n\nint\nseafile_unshare_subdir_for_group (const char *repo_id,\n                                  const char *path,\n                                  const char *owner,\n                                  int share_group,\n                                  GError **error);\n\nint\nseafile_update_share_subdir_perm_for_group (const char *repo_id,\n                                            const char *path,\n                                            const char *owner,\n                                            int share_group,\n                                            const char *permission,\n                                            GError **error);\n\nint\nseafile_group_share_repo (const char *repo_id, int group_id,\n                          const char *user_name, const char *permission,\n                          GError **error);\nint\nseafile_group_unshare_repo (const char *repo_id, int group_id,\n                            const char *user_name, GError **error);\n\n/* Get groups that a repo is shared to */\nchar *\nseafile_get_shared_groups_by_repo(const char *repo_id, GError **error);\n\nchar *\nseafile_get_group_repoids (int group_id, GError **error);\n\nGList *\nseafile_get_repos_by_group (int group_id, GError **error);\n\nGList *\nseafile_get_group_repos_by_owner (char *user, GError **error);\n\nchar *\nseafile_get_group_repo_owner (const char *repo_id, GError **error);\n\nint\nseafile_remove_repo_group(int group_id, const char *username, GError **error);\n\ngint64\nseafile_get_file_size (const char *store_id, int version,\n                       const char *file_id, GError **error);\n\ngint64\nseafile_get_dir_size (const char *store_id, int version,\n                      const char *dir_id, GError **error);\n\nint\nseafile_set_repo_history_limit (const char *repo_id,\n                                int days,\n                                GError **error);\n\nint\nseafile_get_repo_history_limit (const char *repo_id,\n                                GError **error);\n\nint\nseafile_set_repo_valid_since (const char *repo_id,\n                              gint64 timestamp,\n                              GError **error);\n\nint\nseafile_check_passwd (const char *repo_id,\n                      const char *magic,\n                      GError **error);\n\nint\nseafile_set_passwd (const char *repo_id,\n                    const char *user,\n                    const char *passwd,\n                    GError **error);\n\nint\nseafile_unset_passwd (const char *repo_id,\n                      const char *user,\n                      GError **error);\n\nint\nseafile_is_passwd_set (const char *repo_id, const char *user, GError **error);\n\nGObject *\nseafile_get_decrypt_key (const char *repo_id, const char *user, GError **error);\n\nint\nseafile_revert_on_server (const char *repo_id,\n                          const char *commit_id,\n                          const char *user_name,\n                          GError **error);\n\n/**\n * Add a file into the repo on server.\n * The content of the file is stored in a temporary file.\n * @repo_id: repo id\n * @temp_file_path: local file path, should be a temp file just uploaded.\n * @parent_dir: the parent directory to put the file in.\n * @file_name: the name of the target file.\n * @user: the email of the user who uploaded the file.\n */\nint\nseafile_post_file (const char *repo_id, const char *temp_file_path,\n                  const char *parent_dir, const char *file_name,\n                  const char *user,\n                  GError **error);\n\n/**\n * Add multiple files at once.\n *\n * @filenames_json: json array of filenames\n * @paths_json: json array of temp file paths\n */\nchar *\nseafile_post_multi_files (const char *repo_id,\n                          const char *parent_dir,\n                          const char *filenames_json,\n                          const char *paths_json,\n                          const char *user,\n                          int replace,\n                          GError **error);\n\n/**\n * Add file blocks at once.\n *\n * @blocks_json: json array of block ids\n * @paths_json: json array of temp file paths\n */\n/* char * */\n/* seafile_post_file_blocks (const char *repo_id, */\n/*                           const char *parent_dir, */\n/*                           const char *file_name, */\n/*                           const char *blockids_json, */\n/*                           const char *paths_json, */\n/*                           const char *user, */\n/*                           gint64 file_size, */\n/*                           int replace_existed, */\n/*                           GError **error); */\n\n\nint\nseafile_post_empty_file (const char *repo_id, const char *parent_dir,\n                         const char *new_file_name, const char *user,\n                         GError **error);\n\n/**\n * Update an existing file in a repo\n * @params: same as seafile_post_file\n * @head_id: the commit id for the original file version.\n *           It's optional. If it's NULL, the current repo head will be used.\n * @return The new file id\n */\nchar *\nseafile_put_file (const char *repo_id, const char *temp_file_path,\n                  const char *parent_dir, const char *file_name,\n                  const char *user, const char *head_id,\n                  GError **error);\n\n/**\n * Add file blocks at once.\n *\n * @blocks_json: json array of block ids\n * @paths_json: json array of temp file paths\n */\n/* char * */\n/* seafile_put_file_blocks (const char *repo_id, const char *parent_dir, */\n/*                          const char *file_name, const char *blockids_json, */\n/*                          const char *paths_json, const char *user, */\n/*                          const char *head_id, gint64 file_size, GError **error); */\n\n\nint\nseafile_post_dir (const char *repo_id, const char *parent_dir,\n                  const char *new_dir_name, const char *user,\n                  GError **error);\nint\nseafile_mkdir_with_parents (const char *repo_id, const char *parent_dir,\n                            const char *new_dir_path, const char *user,\n                            GError **error);\n\n/**\n * delete a file/directory from the repo on server.\n * @repo_id: repo id\n * @parent_dir: the parent directory of the file to be deleted\n * @file_name: the name of the target file.\n * @user: the email of the user who uploaded the file.\n */\nint\nseafile_del_file (const char *repo_id, \n                  const char *parent_dir, const char *file_name,\n                  const char *user,\n                  GError **error);\n\nint\nseafile_batch_del_files (const char *repo_id,\n                         const char *file_list,\n                         const char *user,\n                         GError **error);\n\n/**\n * copy a file/directory from a repo to another on server.\n */\nGObject *\nseafile_copy_file (const char *src_repo_id,\n                   const char *src_dir,\n                   const char *src_filename,\n                   const char *dst_repo_id,\n                   const char *dst_dir,\n                   const char *dst_filename,\n                   const char *user,\n                   int need_progress,\n                   int synchronous,\n                   GError **error);\n\n\nGObject *\nseafile_move_file (const char *src_repo_id,\n                   const char *src_dir,\n                   const char *src_filename,\n                   const char *dst_repo_id,\n                   const char *dst_dir,\n                   const char *dst_filename,\n                   int replace,\n                   const char *user,\n                   int need_progress,\n                   int synchronous,\n                   GError **error);\n\nGObject *\nseafile_get_copy_task (const char *task_id, GError **error);\n\nint\nseafile_cancel_copy_task (const char *task_id, GError **error);\n\nint\nseafile_rename_file (const char *repo_id,\n                     const char *parent_dir,\n                     const char *oldname,\n                     const char *newname,\n                     const char *user,\n                     GError **error);\n\n/**\n * Return non-zero if filename is valid.\n */\nint\nseafile_is_valid_filename (const char *repo_id,\n                           const char *filename,\n                           GError **error);\n\n\nint\nseafile_set_user_quota (const char *user, gint64 quota, GError **error);\n\ngint64\nseafile_get_user_quota (const char *user, GError **error);\n\nint\nseafile_check_quota (const char *repo_id, gint64 delta, GError **error);\n\nGList *\nseafile_list_user_quota_usage (GError **error);\n\nchar *\nseafile_get_file_id_by_path (const char *repo_id, const char *path,\n                             GError **error);\n\nchar *\nseafile_get_dir_id_by_path (const char *repo_id, const char *path,\n                            GError **error);\n\nGObject *\nseafile_get_dirent_by_path (const char *repo_id, const char *path,\n                            GError **error);\n\n/**\n * Return a list of commits where every commit contains a unique version of\n * the file.\n */\nGList *\nseafile_list_file_revisions (const char *repo_id,\n                             const char *commit_id,\n                             const char *path,\n                             int limit,\n                             GError **error);\n\nGList *\nseafile_calc_files_last_modified (const char *repo_id,\n                                  const char *parent_dir,\n                                  int limit,\n                                  GError **error);\n\nint\nseafile_revert_file (const char *repo_id,\n                     const char *commit_id,\n                     const char *path,\n                     const char *user,\n                     GError **error);\n\nint\nseafile_revert_dir (const char *repo_id,\n                    const char *commit_id,\n                    const char *path,\n                    const char *user,\n                    GError **error);\n\nchar *\nseafile_check_repo_blocks_missing (const char *repo_id,\n                                   const char *blockids_json,\n                                   GError **error);\n\n/*\n * @show_days: return deleted files in how many days, return all if 0.\n */\nGList *\nseafile_get_deleted (const char *repo_id, int show_days,\n                     const char *path, const char *scan_stat,\n                     int limit, GError **error);\n\n/**\n * Generate a new token for (repo_id, email) and return it\n */\nchar *\nseafile_generate_repo_token (const char *repo_id,\n                             const char *email,\n                             GError **error);\n\nint\nseafile_delete_repo_token (const char *repo_id,\n                           const char *token,\n                           const char *user,\n                           GError **error);\n\nGList *\nseafile_list_repo_tokens (const char *repo_id,\n                          GError **error);\n\nGList *\nseafile_list_repo_tokens_by_email (const char *email,\n                                   GError **error);\n\nint\nseafile_delete_repo_tokens_by_peer_id(const char *email, const char *peer_id, GError **error);\n\nint\nseafile_delete_repo_tokens_by_email (const char *email,\n                                     GError **error);\n\n/**\n * create a repo on seahub\n */\nchar *\nseafile_create_repo (const char *repo_name,\n                     const char *repo_desc,\n                     const char *owner_email,\n                     const char *passwd,\n                     int enc_version,\n                     const char *pwd_hash_algo,\n                     const char *pwd_hash_params,\n                     GError **error);\n\nchar *\nseafile_create_enc_repo (const char *repo_id,\n                         const char *repo_name,\n                         const char *repo_desc,\n                         const char *owner_email,\n                         const char *magic,\n                         const char *random_key,\n                         const char *salt,\n                         int enc_version,\n                         const char *pwd_hash,\n                         const char *pwd_hash_algo,\n                         const char *pwd_hash_params,\n                         GError **error);\n\nchar *\nseafile_check_permission (const char *repo_id, const char *user, GError **error);\n\nchar *\nseafile_check_permission_by_path (const char *repo_id, const char *path,\n                                  const char *user, GError **error);\n\nGList *\nseafile_list_dir_with_perm (const char *repo_id,\n                            const char *path,\n                            const char *dir_id,\n                            const char *user,\n                            int offset,\n                            int limit,\n                            GError **error);\n\nint\nseafile_set_inner_pub_repo (const char *repo_id,\n                            const char *permission,\n                            GError **error);\n\nint\nseafile_unset_inner_pub_repo (const char *repo_id, GError **error);\n\nGList *\nseafile_list_inner_pub_repos (GError **error);\n\ngint64\nseafile_count_inner_pub_repos (GError **error);\n\nGList *\nseafile_list_inner_pub_repos_by_owner (const char *user, GError **error);\n\nint\nseafile_is_inner_pub_repo (const char *repo_id, GError **error);\n\nint\nseafile_set_share_permission (const char *repo_id,\n                              const char *from_email,\n                              const char *to_email,\n                              const char *permission,\n                              GError **error);\n\nint\nseafile_set_group_repo_permission (int group_id,\n                                   const char *repo_id,\n                                   const char *permission,\n                                   GError **error);\n\nchar *\nseafile_get_file_id_by_commit_and_path(const char *repo_id,\n                                       const char *commit_id,\n                                       const char *path,\n                                       GError **error);\n\n/* virtual repo related */\n\nchar *\nseafile_create_virtual_repo (const char *origin_repo_id,\n                             const char *path,\n                             const char *repo_name,\n                             const char *repo_desc,\n                             const char *owner,\n                             const char *passwd,\n                             GError **error);\n\nGList *\nseafile_get_virtual_repos_by_owner (const char *owner, GError **error);\n\nGObject *\nseafile_get_virtual_repo (const char *origin_repo,\n                          const char *path,\n                          const char *owner,\n                          GError **error);\n\nchar *\nseafile_get_system_default_repo_id (GError **error);\n\n/* Clean trash */\n\nint\nseafile_clean_up_repo_history (const char *repo_id, int keep_days, GError **error);\n\n/* ------------------ public RPC calls. ------------ */\n\nGList* seafile_get_repo_list_pub (int start, int limit, GError **error);\n\nGObject* seafile_get_repo_pub (const gchar* id, GError **error);\n\nGList* seafile_get_commit_list_pub (const gchar *repo,\n                                    int offset,\n                                    int limit,\n                                    GError **error);\n\nGObject* seafile_get_commit_pub (const gchar *id, GError **error);\n\nchar *seafile_diff_pub (const char *repo_id, const char *old, const char *new,\n                        GError **error);\n\nGList * seafile_list_dir_pub (const char *dir_id, GError **error);\n\nGList *\nseafile_get_shared_users_for_subdir (const char *repo_id,\n                                     const char *path,\n                                     const char *from_user,\n                                     GError **error);\nGList *\nseafile_get_shared_groups_for_subdir (const char *repo_id,\n                                      const char *path,\n                                      const char *from_user,\n                                      GError **error);\nGObject *\nseafile_generate_magic_and_random_key(int enc_version,\n                                      const char* repo_id,\n                                      const char *passwd,\n                                      GError **error);\n\ngint64\nseafile_get_total_file_number (GError **error);\n\ngint64\nseafile_get_total_storage (GError **error);\n\nGObject *\nseafile_get_file_count_info_by_path (const char *repo_id,\n                                     const char *path,\n                                     GError **error);\n\nchar *\nseafile_get_trash_repo_owner (const char *repo_id, GError **error);\n\nint\nseafile_set_server_config_int (const char *group, const char *key, int value, GError **error);\n\nint\nseafile_get_server_config_int (const char *group, const char *key, GError **error);\n\nint\nseafile_set_server_config_int64 (const char *group, const char *key, gint64 value, GError **error);\n\ngint64\nseafile_get_server_config_int64 (const char *group, const char *key, GError **error);\n\nint\nseafile_set_server_config_string (const char *group, const char *key, const char *value, GError **error);\n\nchar *\nseafile_get_server_config_string (const char *group, const char *key, GError **error);\n\nint\nseafile_set_server_config_boolean (const char *group, const char *key, int value, GError **error);\n\nint\nseafile_get_server_config_boolean (const char *group, const char *key, GError **error);\n\nGObject *\nseafile_get_group_shared_repo_by_path (const char *repo_id,\n                                       const char *path,\n                                       int group_id,\n                                       int is_org,\n                                       GError **error);\n\nGObject *\nseafile_get_shared_repo_by_path (const char *repo_id,\n                                 const char *path,\n                                 const char *shared_to,\n                                 int is_org,\n                                 GError **error);\n\nGList *\nseafile_get_group_repos_by_user (const char *user, GError **error);\n\nGList *\nseafile_get_org_group_repos_by_user (const char *user, int org_id, GError **error);\n\nint\nseafile_repo_has_been_shared (const char *repo_id, int including_groups, GError **error);\n\nGList *\nseafile_get_shared_users_by_repo (const char *repo_id, GError **error);\n\nGList *\nseafile_org_get_shared_users_by_repo (int org_id,\n                                      const char *repo_id,\n                                      GError **error);\n\ngint64\nseafile_get_upload_tmp_file_offset (const char *repo_id, const char *file_path,\n                                    GError **error);\n\nchar *\nseafile_convert_repo_path (const char *repo_id,\n                           const char *path,\n                           const char *user,\n                           int is_org,\n                           GError **error);\n\nint\nseafile_set_repo_status(const char *repo_id, int status, GError **error);\n\nint\nseafile_get_repo_status(const char *repo_id, GError **error);\n\nGList*\nseafile_get_repos_by_id_prefix  (const char *id_prefix, int start,\n                                 int limit, GError **error);\n\nint\nseafile_publish_event(const char *channel, const char *content, GError **error);\n\njson_t *\nseafile_pop_event(const char *channel, GError **error);\n\nGList *\nseafile_search_files (const char *repo_id, const char *str, GError **error);\n\nGList *\nseafile_search_files_by_path (const char *repo_id, const char *path, const char *str, GError **error);\n\n/*Following is ccnet rpc*/\nint\nccnet_rpc_add_emailuser (const char *email, const char *passwd,\n                         int is_staff, int is_active, GError **error);\n\nint\nccnet_rpc_remove_emailuser (const char *source, const char *email, GError **error);\n\nint\nccnet_rpc_validate_emailuser (const char *email, const char *passwd, GError **error);\n\nGObject*\nccnet_rpc_get_emailuser (const char *email, GError **error);\n\nGObject*\nccnet_rpc_get_emailuser_with_import (const char *email, GError **error);\n\nGObject*\nccnet_rpc_get_emailuser_by_id (int id, GError **error);\n\nGList*\nccnet_rpc_get_emailusers (const char *source, int start, int limit, const char *status, GError **error);\n\nGList*\nccnet_rpc_search_emailusers (const char *source,\n                             const char *email_patt,\n                             int start, int limit,\n                             GError **error);\n\nGList*\nccnet_rpc_search_ldapusers (const char *keyword,\n                            int start, int limit,\n                            GError **error);\n\n/* Get total counts of email users. */\ngint64\nccnet_rpc_count_emailusers (const char *source, GError **error);\n\ngint64\nccnet_rpc_count_inactive_emailusers (const char *source, GError **error);\n\nint\nccnet_rpc_update_emailuser (const char *source, int id, const char* passwd,\n                            int is_staff, int is_active,\n                            GError **error);\n\nint\nccnet_rpc_update_role_emailuser (const char* email, const char* role, GError **error);\n\nGList*\nccnet_rpc_get_superusers (GError **error);\n\nGList *\nccnet_rpc_get_emailusers_in_list(const char *source, const char *user_list, GError **error);\n\nint\nccnet_rpc_update_emailuser_id (const char *old_email, const char *new_email, GError **error);\n\nint\nccnet_rpc_create_group (const char *group_name, const char *user_name,\n                        const char *type, int parent_group_id, GError **error);\n\nint\nccnet_rpc_create_org_group (int org_id, const char *group_name,\n                            const char *user_name, int parent_group_id, GError **error);\n\nint\nccnet_rpc_remove_group (int group_id, GError **error);\n\nint\nccnet_rpc_group_add_member (int group_id, const char *user_name,\n                            const char *member_name, GError **error);\nint\nccnet_rpc_group_remove_member (int group_id, const char *user_name,\n                               const char *member_name, GError **error);\n\nint\nccnet_rpc_group_set_admin (int group_id, const char *member_name,\n                           GError **error);\n\nint\nccnet_rpc_group_unset_admin (int group_id, const char *member_name,\n                           GError **error);\n\nint\nccnet_rpc_set_group_name (int group_id, const char *group_name,\n                          GError **error);\n\nint\nccnet_rpc_quit_group (int group_id, const char *user_name, GError **error);\n\nGList *\nccnet_rpc_get_groups (const char *username, int return_ancestors, GError **error);\n\nGList *\nccnet_rpc_list_all_departments (GError **error);\n\nGList *\nccnet_rpc_get_all_groups (int start, int limit, const char *source, GError **error);\n\nGList *\nccnet_rpc_get_ancestor_groups (int group_id, GError **error);\n\nGList *\nccnet_rpc_get_top_groups (int including_org, GError **error);\n\nGList *\nccnet_rpc_get_child_groups (int group_id, GError **error);\n\nGList *\nccnet_rpc_get_descendants_groups(int group_id, GError **error);\n\nGObject *\nccnet_rpc_get_group (int group_id, GError **error);\n\nGList *\nccnet_rpc_get_group_members (int group_id, int start, int limit, GError **error);\n\nGList *\nccnet_rpc_get_members_with_prefix(int group_id, const char *prefix, GError **error);\n\nint\nccnet_rpc_check_group_staff (int group_id, const char *user_name, int in_structure,\n                             GError **error);\n\nint\nccnet_rpc_remove_group_user (const char *user, GError **error);\n\nint\nccnet_rpc_is_group_user (int group_id, const char *user, int in_structure, GError **error);\n\nint\nccnet_rpc_set_group_creator (int group_id, const char *user_name,\n                             GError **error);\n\nGList*\nccnet_rpc_search_groups (const char *group_patt,\n                         int start, int limit,\n                         GError **error);\n\nGList *\nccnet_rpc_get_groups_members (const char *group_ids, GError **error);\n\nGList *\nccnet_rpc_search_group_members (int group_id, const char *pattern, GError **error);\n\nint\nccnet_rpc_create_org (const char *org_name, const char *url_prefix,\n                      const char *creator, GError **error);\n\nint\nccnet_rpc_remove_org (int org_id, GError **error);\n\nGList *\nccnet_rpc_get_all_orgs (int start, int limit, GError **error);\n\ngint64\nccnet_rpc_count_orgs (GError **error);\n\nGObject *\nccnet_rpc_get_org_by_url_prefix (const char *url_prefix, GError **error);\n\nGObject *\nccnet_rpc_get_org_by_id (int org_id, GError **error);\n\nint\nccnet_rpc_add_org_user (int org_id, const char *email, int is_staff,\n                        GError **error);\n\nint\nccnet_rpc_remove_org_user (int org_id, const char *email, GError **error);\n\nGList *\nccnet_rpc_get_orgs_by_user (const char *email, GError **error);\n\nGList *\nccnet_rpc_get_org_emailusers (const char *url_prefix, int start , int limit,\n                              GError **error);\n\nint\nccnet_rpc_add_org_group (int org_id, int group_id, GError **error);\n\nint\nccnet_rpc_remove_org_group (int org_id, int group_id, GError **error);\n\nint\nccnet_rpc_is_org_group (int group_id, GError **error);\n\nint\nccnet_rpc_get_org_id_by_group (int group_id, GError **error);\n\nGList *\nccnet_rpc_get_org_groups (int org_id, int start, int limit, GError **error);\n\nGList *\nccnet_rpc_get_org_groups_by_user (const char *user, int org_id, GError **error);\n\nGList *\nccnet_rpc_get_org_top_groups (int org_id, GError **error);\n\nint\nccnet_rpc_org_user_exists (int org_id, const char *email, GError **error);\n\nint\nccnet_rpc_is_org_staff (int org_id, const char *email, GError **error);\n\nint\nccnet_rpc_set_org_staff (int org_id, const char *email, GError **error);\n\nint\nccnet_rpc_unset_org_staff (int org_id, const char *email, GError **error);\n\nint\nccnet_rpc_set_org_name (int org_id, const char *org_name, GError **error);\n\nint\nccnet_rpc_set_reference_id (const char *primary_id, const char *reference_id, GError **error);\n\nchar *\nccnet_rpc_get_primary_id (const char *email, GError **error);\n\n#endif\n"
  },
  {
    "path": "lib/Makefile.am",
    "content": "pcfiles = libseafile.pc\npkgconfig_DATA = $(pcfiles)\npkgconfigdir = $(libdir)/pkgconfig\n\nAM_CPPFLAGS = @GLIB2_CFLAGS@ -I$(top_srcdir)/include \\\n\t-I$(top_srcdir)/lib \\\n\t-I$(top_srcdir)/common \\\n\t@SEARPC_CFLAGS@ \\\n\t@MSVC_CFLAGS@ \\\n\t-Wall\n\nBUILT_SOURCES = gensource\n\n## source file rules\nseafile_object_define = repo.vala commit.vala dirent.vala dir.vala \\\n\ttask.vala branch.vala crypt.vala webaccess.vala seahub.vala copy-task.vala ccnetobj.vala search-result.vala\n\nseafile_object_gen = $(seafile_object_define:.vala=.c)\n\nvalac_gen = ${seafile_object_gen} seafile-object.h\n\nEXTRA_DIST = ${seafile_object_define} rpc_table.py $(pcfiles) vala.stamp\n\nutils_headers = net.h bloom-filter.h utils.h db.h job-mgr.h timer.h\n\nutils_srcs = $(utils_headers:.h=.c)\n\nnoinst_HEADERS = ${utils_headers} include.h\n\nseafiledir = $(includedir)/seafile\nseafile_HEADERS = seafile-object.h\n\nseafile-object.h: ${seafile_object_define}\n\trm -f $@\n\tvalac --pkg posix ${seafile_object_define} -C -H seafile-object.h\n\nDISTCLEANFILES = ${searpc_gen}\n\n## library rules\n\nnoinst_LTLIBRARIES = libseafile_common.la\n\nlibseafile_common_la_SOURCES = ${seafile_object_gen} ${utils_srcs}\nlibseafile_common_la_LDFLAGS = -no-undefined\nlibseafile_common_la_LIBADD = @GLIB2_LIBS@  @GOBJECT_LIBS@ @SSL_LIBS@ -lcrypto @LIB_GDI32@ \\\n\t\t\t\t     @LIB_UUID@ @LIB_WS32@ @LIB_PSAPI@ -lsqlite3 \\\n\t\t\t\t\t @LIBEVENT_LIBS@ @SEARPC_LIBS@ @LIB_SHELL32@ \\\n\t@ZLIB_LIBS@\n\nsearpc_gen = searpc-signature.h searpc-marshal.h\n\ngensource: ${searpc_gen} ${valac_gen}\n\nrpc_table.stamp: ${top_srcdir}/lib/rpc_table.py\n\t@rm -f rpc_table.tmp\n\t@touch rpc_table.tmp\n\t@echo \"[libsearpc]: generating rpc header files\"\n\t@PYTHON@ `which searpc-codegen.py` ${top_srcdir}/lib/rpc_table.py\n\t@echo \"[libsearpc]: done\"\n\t@mv -f rpc_table.tmp $@\n\n${searpc_gen}: rpc_table.stamp\n\nvala.stamp: ${seafile_object_define}\n\trm -f ${seafile_object_gen}\n\t@rm -f vala.tmp\n\t@touch vala.tmp\n\tvalac -C --pkg posix $^\n\t@mv -f vala.tmp $@\n\n${seafile_object_gen}: vala.stamp\n\nclean-local:\n\trm -f ${searpc_gen}\n\trm -f rpc_table.pyc\n\trm -f rpc_table.stamp\n\trm -f rpc_table.tmp\n\trm -f vala.tmp vala.stamp ${valac_gen}\n\ninstall-data-local:\nif MACOS\n\tsed -i '' -e \"s|(DESTDIR)|${DESTDIR}|g\" $(pcfiles)\nelse\n\t${SED} -i \"s|(DESTDIR)|${DESTDIR}|g\" $(pcfiles)\nendif\n"
  },
  {
    "path": "lib/bloom-filter.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include <limits.h>\n#include <stdint.h>\n#include <string.h>\n#include <openssl/sha.h>\n#include <assert.h>\n\n#include \"bloom-filter.h\"\n\n#define SETBIT(a, n) (a[n/CHAR_BIT] |= (1<<(n%CHAR_BIT)))\n#define CLEARBIT(a, n) (a[n/CHAR_BIT] &= ~(1<<(n%CHAR_BIT)))\n#define GETBIT(a, n) (a[n/CHAR_BIT] & (1<<(n%CHAR_BIT)))\n\nBloom* bloom_create(size_t size, int k, int counting)\n{\n    Bloom *bloom;\n    size_t csize = 0;\n\n    if (k <=0 || k > 4) return NULL;\n    \n    if ( !(bloom = malloc(sizeof(Bloom))) ) return NULL;\n    if ( !(bloom->a = calloc((size+CHAR_BIT-1)/CHAR_BIT, sizeof(char))) )\n    {\n        free (bloom);\n        return NULL;\n    }\n    if (counting) {\n        csize = size*4;\n        bloom->counters = calloc((csize+CHAR_BIT-1)/CHAR_BIT, sizeof(char));\n        if (!bloom->counters) {\n            free (bloom);\n            return NULL;\n        }\n    }\n\n    bloom->asize = size;\n    bloom->csize = csize;\n    bloom->k = k;\n    bloom->counting = counting;\n\n    return bloom;\n}\n\nint bloom_destroy(Bloom *bloom)\n{\n    free (bloom->a);\n    if (bloom->counting) free (bloom->counters);\n    free (bloom);\n\n    return 0;\n}\n\nstatic void\nincr_bit (Bloom *bf, unsigned int bit_idx)\n{\n    unsigned int char_idx, offset;\n    unsigned char value;\n    unsigned int high;\n    unsigned int low;\n\n    SETBIT (bf->a, bit_idx);\n\n    if (!bf->counting) return;\n\n    char_idx = bit_idx / 2;\n    offset = bit_idx % 2;\n\n    value = bf->counters[char_idx];\n    low = value & 0xF;\n    high = (value & 0xF0) >> 4;\n\n    if (offset == 0) {\n        if (low < 0xF)\n            low++;\n    } else {\n        if (high < 0xF)\n            high++;\n    }\n    value = ((high << 4) | low);\n\n    bf->counters[char_idx] = value;\n}\n\nstatic void\ndecr_bit (Bloom *bf, unsigned int bit_idx)\n{\n    unsigned int char_idx, offset;\n    unsigned char value;\n    unsigned int high;\n    unsigned int low;\n\n    if (!bf->counting) {\n        CLEARBIT (bf->a, bit_idx);\n        return;\n    }\n\n    char_idx = bit_idx / 2;\n    offset = bit_idx % 2;\n\n    value = bf->counters[char_idx];\n    low = value & 0xF;\n    high = (value & 0xF0) >> 4;\n\n    /* decrement, but once we have reached the max, never go back! */\n    if (offset == 0) {\n        if ((low > 0) && (low < 0xF))\n            low--;\n        if (low == 0) {\n            CLEARBIT (bf->a, bit_idx);\n        }\n    } else {\n        if ((high > 0) && (high < 0xF))\n            high--;\n        if (high == 0) {\n            CLEARBIT (bf->a, bit_idx);\n        }\n    }\n    value = ((high << 4) | low);\n\n    bf->counters[char_idx] = value;\n}\n\nint bloom_add(Bloom *bloom, const char *s)\n{\n    int i;\n    SHA256_CTX c;\n    unsigned char sha256[SHA256_DIGEST_LENGTH];\n    size_t *sha_int = (size_t *)&sha256;\n    \n    SHA256_Init(&c);\n    SHA256_Update(&c, s, strlen(s));\n    SHA256_Final (sha256, &c);\n    \n    for (i=0; i < bloom->k; ++i)\n        incr_bit (bloom, sha_int[i] % bloom->asize);\n\n    return 0;\n}\n\nint bloom_remove(Bloom *bloom, const char *s)\n{\n    int i;\n    SHA256_CTX c;\n    unsigned char sha256[SHA256_DIGEST_LENGTH];\n    size_t *sha_int = (size_t *)&sha256;\n    \n    if (!bloom->counting)\n        return -1;\n\n    SHA256_Init(&c);\n    SHA256_Update(&c, s, strlen(s));\n    SHA256_Final (sha256, &c);\n    \n    for (i=0; i < bloom->k; ++i)\n        decr_bit (bloom, sha_int[i] % bloom->asize);\n\n    return 0;\n}\n\nint bloom_test(Bloom *bloom, const char *s)\n{\n    int i;\n    SHA256_CTX c;\n    unsigned char sha256[SHA256_DIGEST_LENGTH];\n    size_t *sha_int = (size_t *)&sha256;\n    \n    SHA256_Init(&c);\n    SHA256_Update(&c, s, strlen(s));\n    SHA256_Final (sha256, &c);\n    \n    for (i=0; i < bloom->k; ++i)\n        if(!(GETBIT(bloom->a, sha_int[i] % bloom->asize))) return 0;\n\n    return 1;\n}\n"
  },
  {
    "path": "lib/bloom-filter.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef __BLOOM_H__\n#define __BLOOM_H__\n\n#include <stdlib.h>\n\ntypedef struct {\n    size_t          asize;\n    unsigned char  *a;\n    size_t          csize;\n    unsigned char  *counters;\n    int             k;\n    char            counting:1;\n} Bloom;\n\nBloom *bloom_create (size_t size, int k, int counting);\nint bloom_destroy (Bloom *bloom);\nint bloom_add (Bloom *bloom, const char *s);\nint bloom_remove (Bloom *bloom, const char *s);\nint bloom_test (Bloom *bloom, const char *s);\n\n#endif\n"
  },
  {
    "path": "lib/branch.vala",
    "content": "// compile this file with `valac --pkg posix repo.vala -C -H repo.h`\n\nnamespace Seafile {\n\npublic class Branch : Object {\n\n    public string _name;\n    public string name {\n        get { return _name; }\n        set { _name = value; }\n    }\n\n    public string _commit_id;\n    public string commit_id {\n        get { return _commit_id; }\n        set { _commit_id = value; }\n    }\n\n\tpublic string _repo_id;\n\tpublic string repo_id {\n\t\tget { return _repo_id; }\n\t\tset { _repo_id = value; }\n\t}\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/ccnetobj.vala",
    "content": "\n\nnamespace Ccnet {\n\n\npublic class EmailUser : Object {\n\n    public int id { get; set; }\n    public string email { get; set; }\n    public bool is_staff { get; set; }\n    public bool is_active { get; set; }\n    public int64 ctime { get; set; }\n    public string source { get; set; }\n    public string role { get; set; }\n    public string password { get; set; }\n    public string reference_id { get; set; }\n}\n\npublic class Group : Object {\n\n    public int id { get; set; }\n    public string group_name { get; set; }\n    public string creator_name { get; set; }\n    public int64 timestamp { get; set; }\n    public string source { get; set; }\n    public int parent_group_id { get; set; }\n\n}\n\npublic class GroupUser : Object {\n\n    public int group_id { get; set; }\n    public string user_name { get; set; }\n    public int is_staff { get; set; }\n}\n\npublic class Organization : Object {\n\n   public int org_id { get; set; }\n   public string email { get; set; }\n   public int is_staff { get; set; }\n   public string org_name { get; set; }\n   public string url_prefix { get; set; }\n   public string creator { get; set; }\n   public int64 ctime { get; set; }\n   \n}\n\n} // namespace\n"
  },
  {
    "path": "lib/commit.vala",
    "content": "// compile this file with `valac --pkg posix repo.vala -C -H repo.h`\n\nnamespace Seafile {\n\npublic class Commit : Object {\n\n    // _id is for fast access from c code. id is for\n\t// vala to automatically generate a property. Note,\n\t// if a Vala property is start with _, it is not\n\t// translated into a GObject property.\n\tpublic char _id[41];\n\tpublic string id {\n\t\tget { return (string)_id; }\n\t\tset { Posix.memcpy(_id, value, 40); _id[40] = '\\0'; }\n\t}\n\n    public string creator_name { get; set; }\n\n\tpublic string _creator;     // creator\n\tpublic string creator {\n\t\tget { return _creator; }\n\t\tset { _creator = value; }\n\t}\n\n\tpublic string _desc;\t\t// description: what does this commit change\n\tpublic string desc {\n\t\tget { return _desc; }\n\t\tset { _desc = value; }\n\t}\n\n\tpublic int64 _ctime;\t\t// create time\n\tpublic int64 ctime {\n\t\tget { return _ctime; }\n\t\tset { _ctime = value; }\n\t}\n\n\tpublic string parent_id { get; set;}\n\n\tpublic string second_parent_id { get; set; }\n\n\tpublic string _repo_id;\n\tpublic string repo_id {\n\t\tget { return _repo_id; }\n\t\tset { _repo_id = value; }\n\t}\n\n\n\t// A commit point to a file or dir, not both.\n\n\tpublic string _root_id;\n\tpublic string root_id {\n\t\tget { return _root_id; }\n\t\tset { _root_id = value; }\n\t}\n\n\t// Repo data-format version of this commit\n\tpublic int version { get; set; }\n\tpublic bool new_merge { get; set; }\n\tpublic bool conflict { get; set; }\n\n\t// Used for returning file revision\n\tpublic string rev_file_id { get; set; }\n\tpublic int64 rev_file_size { get; set; }\n\t// Set if this commit renames a revision of a file\n\tpublic string rev_renamed_old_path { get; set; }\n\n\tpublic string device_name { get; set; }\n\tpublic string client_version { get; set; }\n\n    //Only used for file history pagination\n    public string next_start_commit { get; set; }\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/copy-task.vala",
    "content": "namespace Seafile {\n\npublic class CopyTask : Object {\n       public int64 done { set; get; }\n       public int64 total { set; get; }\n       public bool canceled { set; get; }\n       public bool failed { set; get; }\n       public string failed_reason { set; get; }\n       public bool successful { set; get; }\n}\n\npublic class CopyResult : Object {\n       public bool background { set; get; }\n       public string task_id { set; get; }\n}\n\n}\n"
  },
  {
    "path": "lib/crypt.vala",
    "content": "namespace Seafile {\n\npublic class CryptKey : Object {\n       public string key { set; get; }\n       public string iv { set; get; }\n}\n\n}\n"
  },
  {
    "path": "lib/db.c",
    "content": "\n#include <glib.h>\n#include <unistd.h>\n\n#include \"db.h\"\n\nint\nsqlite_open_db (const char *db_path, sqlite3 **db)\n{\n    int result;\n    const char *errmsg;\n\n    result = sqlite3_open (db_path, db);\n    if (result) {\n        errmsg = sqlite3_errmsg (*db);\n                                \n        g_warning (\"Couldn't open database:'%s', %s\\n\", \n                   db_path, errmsg ? errmsg : \"no error given\");\n\n        sqlite3_close (*db);\n        return -1;\n    }\n\n    return 0;\n}\n\nint sqlite_close_db (sqlite3 *db)\n{\n    return sqlite3_close (db);\n}\n\nsqlite3_stmt *\nsqlite_query_prepare (sqlite3 *db, const char *sql)\n{\n    sqlite3_stmt *stmt;\n    int result;\n\n    result = sqlite3_prepare_v2 (db, sql, -1, &stmt, NULL);\n\n    if (result != SQLITE_OK) {\n        const gchar *str = sqlite3_errmsg (db);\n\n        g_warning (\"Couldn't prepare query, error:%d->'%s'\\n\\t%s\\n\", \n                   result, str ? str : \"no error given\", sql);\n\n        return NULL;\n    }\n\n    return stmt;\n}\n\nint\nsqlite_query_exec (sqlite3 *db, const char *sql)\n{\n    char *errmsg = NULL;\n    int result;\n\n    result = sqlite3_exec (db, sql, NULL, NULL, &errmsg);\n\n    if (result != SQLITE_OK) {\n        if (errmsg != NULL) {\n            g_warning (\"SQL error: %d - %s\\n:\\t%s\\n\", result, errmsg, sql);\n            sqlite3_free (errmsg);\n        }\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nsqlite_begin_transaction (sqlite3 *db)\n{\n    char *sql = \"BEGIN TRANSACTION;\";\n    return sqlite_query_exec (db, sql);\n}\n\nint\nsqlite_end_transaction (sqlite3 *db)\n{\n    char *sql = \"END TRANSACTION;\";\n    return sqlite_query_exec (db, sql);\n}\n\n\ngboolean\nsqlite_check_for_existence (sqlite3 *db, const char *sql)\n{\n    sqlite3_stmt *stmt;\n    int result;\n\n    stmt = sqlite_query_prepare (db, sql);\n    if (!stmt)\n        return FALSE;\n\n    result = sqlite3_step (stmt);\n    if (result == SQLITE_ERROR) {\n        const gchar *str = sqlite3_errmsg (db);\n\n        g_warning (\"Couldn't execute query, error: %d->'%s'\\n\", \n                   result, str ? str : \"no error given\");\n        sqlite3_finalize (stmt);\n        return FALSE;\n    }\n    sqlite3_finalize (stmt);\n\n    if (result == SQLITE_ROW)\n        return TRUE;\n    return FALSE;\n}\n\nint\nsqlite_foreach_selected_row (sqlite3 *db, const char *sql, \n                             SqliteRowFunc callback, void *data)\n{\n    sqlite3_stmt *stmt;\n    int result;\n    int n_rows = 0;\n\n    stmt = sqlite_query_prepare (db, sql);\n    if (!stmt) {\n        return -1;\n    }\n\n    while (1) {\n        result = sqlite3_step (stmt);\n        if (result != SQLITE_ROW)\n            break;\n        n_rows++;\n        if (!callback (stmt, data))\n            break;\n    }\n\n    if (result == SQLITE_ERROR) {\n        const gchar *s = sqlite3_errmsg (db);\n\n        g_warning (\"Couldn't execute query, error: %d->'%s'\\n\",\n                   result, s ? s : \"no error given\");\n        sqlite3_finalize (stmt);\n        return -1;\n    }\n\n    sqlite3_finalize (stmt);\n    return n_rows;\n}\n\nint sqlite_get_int (sqlite3 *db, const char *sql)\n{\n    int ret = -1;\n    int result;\n    sqlite3_stmt *stmt;\n\n    if ( !(stmt = sqlite_query_prepare(db, sql)) )\n        return 0;\n\n    result = sqlite3_step (stmt);\n    if (result == SQLITE_ROW) {\n        ret = sqlite3_column_int (stmt, 0);\n        sqlite3_finalize (stmt);\n        return ret;\n    }\n\n    if (result == SQLITE_ERROR) {\n        const gchar *str = sqlite3_errmsg (db);\n        g_warning (\"Couldn't execute query, error: %d->'%s'\\n\",\n                   result, str ? str : \"no error given\");\n        sqlite3_finalize (stmt);\n        return -1;\n    }\n\n    sqlite3_finalize(stmt);\n    return ret;\n}\n\ngint64 sqlite_get_int64 (sqlite3 *db, const char *sql)\n{\n    gint64 ret = -1;\n    int result;\n    sqlite3_stmt *stmt;\n\n    if ( !(stmt = sqlite_query_prepare(db, sql)) )\n        return 0;\n\n    result = sqlite3_step (stmt);\n    if (result == SQLITE_ROW) {\n        ret = sqlite3_column_int64 (stmt, 0);\n        sqlite3_finalize (stmt);\n        return ret;\n    }\n\n    if (result == SQLITE_ERROR) {\n        const gchar *str = sqlite3_errmsg (db);\n        g_warning (\"Couldn't execute query, error: %d->'%s'\\n\",\n                   result, str ? str : \"no error given\");\n        sqlite3_finalize (stmt);\n        return -1;\n    }\n\n    sqlite3_finalize(stmt);\n    return ret;\n}\n\nchar *sqlite_get_string (sqlite3 *db, const char *sql)\n{\n    const char *res = NULL;\n    int result;\n    sqlite3_stmt *stmt;\n    char *ret;\n\n    if ( !(stmt = sqlite_query_prepare(db, sql)) )\n        return NULL;\n\n    result = sqlite3_step (stmt);\n    if (result == SQLITE_ROW) {\n        res = (const char *)sqlite3_column_text (stmt, 0);\n        ret = g_strdup(res);\n        sqlite3_finalize (stmt);\n        return ret;\n    }\n\n    if (result == SQLITE_ERROR) {\n        const gchar *str = sqlite3_errmsg (db);\n        g_warning (\"Couldn't execute query, error: %d->'%s'\\n\",\n                   result, str ? str : \"no error given\");\n        sqlite3_finalize (stmt);\n        return NULL;\n    }\n\n    sqlite3_finalize(stmt);\n    return NULL;\n}\n"
  },
  {
    "path": "lib/db.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef DB_UTILS_H\n#define DB_UTILS_H\n\n#include <sqlite3.h>\n\nint sqlite_open_db (const char *db_path, sqlite3 **db);\n\nint sqlite_close_db (sqlite3 *db);\n\nsqlite3_stmt *sqlite_query_prepare (sqlite3 *db, const char *sql);\n\nint sqlite_query_exec (sqlite3 *db, const char *sql);\nint sqlite_begin_transaction (sqlite3 *db);\nint sqlite_end_transaction (sqlite3 *db);\n\ngboolean sqlite_check_for_existence (sqlite3 *db, const char *sql);\n\ntypedef gboolean (*SqliteRowFunc) (sqlite3_stmt *stmt, void *data);\n\nint\nsqlite_foreach_selected_row (sqlite3 *db, const char *sql, \n                             SqliteRowFunc callback, void *data);\n\nint sqlite_get_int (sqlite3 *db, const char *sql);\n\ngint64 sqlite_get_int64 (sqlite3 *db, const char *sql);\n\nchar *sqlite_get_string (sqlite3 *db, const char *sql);\n\n\n#endif\n"
  },
  {
    "path": "lib/dir.vala",
    "content": "namespace Seafile {\n\n\tpublic class Dir : Object {\n\n\t\t// _id is for fast access from c code. id is for\n\t\t// vala to automatically generate a property. Note,\n\t\t// if a Vala property is start with _, it is not\n\t\t// translated into a GObject property.\n\t\tpublic char _id[41];\n\t\tpublic string id {\n\t\t\tget { return (string)_id; }\n\t\t\tset { Posix.memcpy(_id, value, 40); _id[40] = '\\0'; }\n\t\t}\n\t\t\n\t\tpublic List<Dirent> entries;\n\t\tpublic int version { set; get; }\n\t}\n\n    public class FileCountInfo : Object {\n        public int64 file_count { set; get; }\n        public int64 dir_count { set; get; }\n        public int64 size { set; get; }\n    }\n\n} // namespace\n"
  },
  {
    "path": "lib/dirent.vala",
    "content": "namespace Seafile {\n\npublic class Dirent : Object {\n\n    // _id is for fast access from c code. id is for\n\t// vala to automatically generate a property. Note,\n\t// if a Vala property is start with _, it is not\n\t// translated into a GObject property.\n\tpublic string obj_id { set; get; }\n\n\tpublic string obj_name { set; get; }\n\n\tpublic int mode { set; get; }\n\n\tpublic int version { set; get; }\n\tpublic int64 mtime { set; get; }\n\tpublic int64 size { set; get; }\n\tpublic string modifier { set; get;}\n\n\tpublic string permission { set; get; }\n\n\tpublic bool is_locked { set; get; }\n\tpublic string lock_owner { set; get; }\n\tpublic int64 lock_time { set; get; }\n\n\tpublic bool is_shared { set; get; }\n}\n\npublic class FileLastModifiedInfo : Object {\n\n\tpublic string file_name { set; get; }\n\n    public int64 last_modified { set; get; }\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/file.vala",
    "content": "namespace Seafile {\n\npublic class File : Object {\n\n    // _id is for fast access from c code. id is for\n\t// vala to automatically generate a property. Note,\n\t// if a Vala property is start with _, it is not\n\t// translated into a GObject property.\n\tpublic char _id[41];\n\tpublic string id {\n\t\tget { return (string)_id; }\n\t\tset { Posix.memcpy(_id, id, 40); _id[40] = '\\0'; }\n\t}\n\n\tpublic uint64 size;\n}\n\n} // namespace"
  },
  {
    "path": "lib/include.h",
    "content": "\n#include <config.h>\n#include <stdint.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n#include <glib.h>\n\n#include \"utils.h\"\n\n#ifndef ccnet_warning\n  #define ccnet_warning(fmt, ...) g_warning( \"%s: \" fmt,  __func__ , ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_error\n  #define ccnet_error(fmt, ...)   g_error( \"%s: \" fmt,  __func__ , ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_message\n  #define ccnet_message(fmt, ...) g_message(fmt, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_debug\n  #define ccnet_debug(fmt, ...) g_debug(fmt, ##__VA_ARGS__)\n#endif\n\n\n#ifndef ENABLE_DEBUG\n#undef g_debug\n#define g_debug(...)  \n#endif\n"
  },
  {
    "path": "lib/job-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#include <event2/event_compat.h>\n#else\n#include <event.h>\n#endif\n\n#include <string.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <errno.h>\n\n#define MAX_THREADS 50\n#define MAX_IDLE_THREADS 10\n\n#include \"utils.h\"\n\n#include \"job-mgr.h\"\n\nstruct _CcnetJob {\n    CcnetJobManager *manager;\n\n    int             id;\n    ccnet_pipe_t    pipefd[2];\n\n    JobThreadFunc   thread_func;\n    JobDoneCallback done_func;  /* called when the thread is done */\n    void           *data;\n\n    /* the done callback should only access this field */\n    void           *result;\n};\n\n\nvoid\nccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id);\n\nstatic void\njob_thread_wrapper (void *vdata, void *unused)\n{\n    CcnetJob *job = vdata;\n\n    \n    job->result = job->thread_func (job->data);\n    if (pipewriten (job->pipefd[1], \"a\", 1) != 1) {\n        g_warning (\"[Job Manager] write to pipe error: %s\\n\", strerror(errno));\n    }\n}\n\nstatic void\njob_done_cb (evutil_socket_t fd, short event, void *vdata)\n{\n    CcnetJob *job = vdata;\n    char buf[1];\n\n    if (pipereadn (job->pipefd[0], buf, 1) != 1) {\n        g_warning (\"[Job Manager] read pipe error: %s\\n\", strerror(errno));\n    }\n    pipeclose (job->pipefd[0]);\n    pipeclose (job->pipefd[1]);\n    if (job->done_func) {\n        job->done_func (job->result);\n    }\n\n    ccnet_job_manager_remove_job (job->manager, job->id);\n}\n\nint\njob_thread_create (CcnetJob *job)\n{\n    if (ccnet_pipe (job->pipefd) < 0) {\n        g_warning (\"pipe error: %s\\n\", strerror(errno));\n        return -1;\n    }\n\n    g_thread_pool_push (job->manager->thread_pool, job, NULL);\n\n#ifndef UNIT_TEST\n    event_once (job->pipefd[0], EV_READ, job_done_cb, job, NULL);\n#endif\n\n    return 0;\n}\n\nCcnetJob *\nccnet_job_new ()\n{\n    CcnetJob *job;\n\n    job = g_new0 (CcnetJob, 1);\n    return job;\n}\n\nvoid\nccnet_job_free (CcnetJob *job)\n{\n    g_free (job);\n}\n\nCcnetJobManager *\nccnet_job_manager_new (int max_threads)\n{\n    CcnetJobManager *mgr;\n\n    mgr = g_new0 (CcnetJobManager, 1);\n    mgr->jobs = g_hash_table_new_full (g_direct_hash, g_direct_equal,\n                                       NULL, (GDestroyNotify)ccnet_job_free);\n    mgr->thread_pool = g_thread_pool_new (job_thread_wrapper,\n                                          NULL,\n                                          max_threads,\n                                          FALSE,\n                                          NULL);\n    /* g_thread_pool_set_max_unused_threads (MAX_IDLE_THREADS); */\n\n    return mgr;\n}\n\nvoid\nccnet_job_manager_free (CcnetJobManager *mgr)\n{\n    g_hash_table_destroy (mgr->jobs);\n    g_thread_pool_free (mgr->thread_pool, TRUE, FALSE);\n    g_free (mgr);\n}\n\nint\nccnet_job_manager_schedule_job (CcnetJobManager *mgr,\n                               JobThreadFunc func,\n                               JobDoneCallback done_func,\n                               void *data)\n{\n    CcnetJob *job = ccnet_job_new ();\n    job->id = mgr->next_job_id++;\n    job->manager = mgr;\n    job->thread_func = func;\n    job->done_func = done_func;\n    job->data = data;\n    \n    g_hash_table_insert (mgr->jobs, (gpointer)(long)job->id, job);\n\n    if (job_thread_create (job) < 0) {\n        g_hash_table_remove (mgr->jobs, (gpointer)(long)job->id);\n        return -1;\n    }\n\n    return job->id;\n}\n\nvoid\nccnet_job_manager_remove_job (CcnetJobManager *mgr, int job_id)\n{\n    g_hash_table_remove (mgr->jobs, (gpointer)(long)job_id);\n}\n\n#ifdef UNIT_TEST\nvoid\nccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id)\n{\n    CcnetJob *job;\n    \n    job = g_hash_table_lookup (mgr->jobs, (gpointer)(long)job_id);\n    /* manually call job_done_cb */\n    job_done_cb (0, 0, (void *)job);\n}\n#endif\n"
  },
  {
    "path": "lib/job-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n/**\n * Job Manager manages long term jobs. These jobs are run in their\n * own threads.\n */\n\n#ifndef JOB_MGR_H\n#define JOB_MGR_H\n\n#include <glib.h>\n\nstruct _CcnetSession;\n\ntypedef struct _CcnetJob CcnetJob;\ntypedef struct _CcnetJobManager CcnetJobManager;\n\n/*\n  The thread func should return the result back by\n     return (void *)result;\n  The result will be passed to JobDoneCallback.\n */\ntypedef void* (*JobThreadFunc)(void *data);\ntypedef void (*JobDoneCallback)(void *result);\n\n\nstruct _CcnetJobManager {\n    GHashTable      *jobs;\n\n    GThreadPool     *thread_pool;\n\n    int              next_job_id;\n};\n\nvoid\nccnet_job_cancel (CcnetJob *job);\n\nCcnetJobManager *\nccnet_job_manager_new (int max_threads);\n\nvoid\nccnet_job_manager_free (CcnetJobManager *mgr);\n\nint\nccnet_job_manager_schedule_job (CcnetJobManager *mgr,\n                                JobThreadFunc func,\n                                JobDoneCallback done_func,\n                                void *data);\n\n/** \n * Wait a specific job to be done.\n */\nvoid\nccnet_job_manager_wait_job (CcnetJobManager *mgr, int job_id);\n\n\n#endif\n"
  },
  {
    "path": "lib/libseafile.pc.in",
    "content": "prefix=(DESTDIR)@prefix@\nexec_prefix=@exec_prefix@\nlibdir=@libdir@\nincludedir=@includedir@\n\nName: libseafile\nDescription: Client library for accessing seafile service.\nVersion: @VERSION@\nLibs: -L${libdir} -lseafile @SEARPC_LIBS@\nCflags: -I${includedir} @SEARPC_CFLAGS@\nRequires: gobject-2.0 glib-2.0\n"
  },
  {
    "path": "lib/net.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n#ifdef WIN32\n    #define WINVER 0x0501\n    #include <inttypes.h>\n    #include <winsock2.h>\n    #include <ctype.h>\n    #include <ws2tcpip.h>\n#endif\n#include \"include.h\"\n\n#include <unistd.h>\n#include <stdio.h>\n#include <string.h>\n#include <stdlib.h>\n#include <string.h>\n\n\n#ifdef WIN32\n    #define UNUSED \n#else\n    #include <sys/types.h>\n    #include <sys/socket.h>\n    #include <sys/ioctl.h>\n    #include <netinet/in.h>\n    #include <arpa/inet.h>\n    #include <netdb.h>\n    #include <sys/un.h>\n    #include <net/if.h>\n    #include <netinet/tcp.h>\n#endif\n\n#include <fcntl.h>\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/util.h>\n#else\n#include <evutil.h>\n#endif\n\n#include \"net.h\"\n\n\n#ifdef WIN32\n\n#ifndef inet_aton\nint inet_aton(const char *string, struct in_addr *addr)\n{\n    addr->s_addr = inet_addr(string);\n    if (addr->s_addr != -1 || strcmp(\"255.255.255.255\", string) == 0)\n        return 1;\n    return 0;\n}\n#endif\n\n#endif //WIN32\n\nint\nccnet_netSetTOS (evutil_socket_t s, int tos)\n{\n#ifdef IP_TOS\n    return setsockopt( s, IPPROTO_IP, IP_TOS, (char*)&tos, sizeof( tos ) );\n#else\n    return 0;\n#endif\n}\n\nstatic evutil_socket_t\nmakeSocketNonBlocking (evutil_socket_t fd)\n{\n    if (fd >= 0)\n    {\n        if (evutil_make_socket_nonblocking(fd))\n        {\n            ccnet_warning (\"Couldn't make socket nonblock: %s\",\n                           evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));\n            evutil_closesocket(fd);\n            fd = -1;\n        }\n    }\n    return fd;\n}\n\nstatic evutil_socket_t\ncreateSocket (int family, int nonblock)\n{\n    evutil_socket_t fd;\n    int ret;\n\n    fd = socket (family, SOCK_STREAM, 0);\n\n    if (fd < 0) {\n        ccnet_warning(\"create Socket failed %d\\n\", fd);\n    } else if (nonblock) {\n        int nodelay = 1;\n\n        fd = makeSocketNonBlocking( fd );\n\n        ret = setsockopt (fd, IPPROTO_TCP, TCP_NODELAY,\n                          (char *)&nodelay, sizeof(nodelay));\n        if (ret < 0) {\n            ccnet_warning(\"setsockopt failed\\n\");\n            evutil_closesocket(fd);\n            return -1;\n        }\n    }\n\n    return fd;\n}\n\nevutil_socket_t\nccnet_net_open_tcp (const struct sockaddr *sa, int nonblock)\n{\n    evutil_socket_t s;\n    int sa_len;\n\n    if( (s = createSocket(sa->sa_family, nonblock)) < 0 )\n        return -1;\n\n#ifndef WIN32\n    if (sa->sa_family == AF_INET)\n        sa_len = sizeof (struct sockaddr_in); \n    else\n        sa_len = sizeof (struct sockaddr_in6);\n#else\n    if (sa->sa_family == AF_INET)\n        sa_len = sizeof (struct sockaddr_in); \n    else\n        return -1;\n#endif\n\n\n    if( (connect(s, sa, sa_len) < 0)\n#ifdef WIN32\n        && (sockerrno != WSAEWOULDBLOCK)\n#endif\n        && (sockerrno != EINPROGRESS) )\n    {\n        evutil_closesocket(s);\n        s = -1;\n    }\n\n    return s;\n}\n\nevutil_socket_t\nccnet_net_bind_tcp (int port, int nonblock)\n{\n#ifndef WIN32\n    int sockfd, n;\n    struct addrinfo hints, *res, *ressave;\n    char buf[10];\n        \n    memset (&hints, 0,sizeof (struct addrinfo));\n    hints.ai_flags = AI_PASSIVE;\n    hints.ai_family = AF_UNSPEC;\n    hints.ai_socktype = SOCK_STREAM;\n\n    snprintf (buf, sizeof(buf), \"%d\", port);\n\n    if ( (n = getaddrinfo(NULL, buf, &hints, &res) ) != 0) {\n        ccnet_warning (\"getaddrinfo fails: %s\\n\", gai_strerror(n));\n        return -1;\n    }\n\n    ressave = res;\n    \n    do {\n        int on = 1;\n\n        sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n        if (sockfd < 0)\n            continue;       /* error - try next one */\n\n\t\tif (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) {\n\t\t\tccnet_warning (\"setsockopt of SO_REUSEADDR error\\n\");\n            continue;\n        }\n\n        if (nonblock)\n            sockfd = makeSocketNonBlocking (sockfd);\n        if (sockfd < 0)\n            continue;       /* error - try next one */\n\n        if (bind(sockfd, res->ai_addr, res->ai_addrlen) == 0)\n            break;          /* success */\n\n        close(sockfd);      /* bind error - close and try next one */\n    } while ( (res = res->ai_next) != NULL);\n\n    freeaddrinfo (ressave);\n\n    if (res == NULL) {\n        ccnet_warning (\"bind fails: %s\\n\", strerror(errno));\n        return -1;\n    }\n\n    return sockfd;\n#else\n\n    evutil_socket_t s;\n    struct sockaddr_in sock;\n    const int type = AF_INET;\n#if defined( SO_REUSEADDR ) || defined( SO_REUSEPORT )\n    int optval;\n#endif\n\n    if ((s = createSocket(type, nonblock)) < 0)\n        return -1;\n\n    optval = 1;\n    setsockopt (s, SOL_SOCKET, SO_REUSEADDR, (char*)&optval, sizeof(optval));\n\n    memset(&sock, 0, sizeof(sock));\n    sock.sin_family      = AF_INET;\n    sock.sin_addr.s_addr = INADDR_ANY;\n    sock.sin_port        = htons(port);\n\n    if ( bind(s, (struct sockaddr *)&sock, sizeof(struct sockaddr_in)) < 0)\n    {\n        ccnet_warning (\"bind fails: %s\\n\", strerror(errno));\n        evutil_closesocket (s);\n        return -1;\n    }\n    if (nonblock)\n        s = makeSocketNonBlocking (s);\n     \n    return s;\n#endif\n}\n\nint\nccnet_net_make_socket_blocking(evutil_socket_t fd)\n{\n#ifdef WIN32\n\t{\n\t\tu_long nonblocking = 0;\n\t\tif (ioctlsocket(fd, FIONBIO, &nonblocking) == SOCKET_ERROR) {\n\t\t\tccnet_warning (\"fcntl(%d, F_GETFL)\", (int)fd);\n\t\t\treturn -1;\n\t\t}\n\t}\n#else\n\t{\n\t\tint flags;\n\t\tif ((flags = fcntl(fd, F_GETFL, NULL)) < 0) {\n\t\t\tccnet_warning (\"fcntl(%d, F_GETFL)\", fd);\n\t\t\treturn -1;\n\t\t}\n\t\tif (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == -1) {\n\t\t\tccnet_warning (\"fcntl(%d, F_SETFL)\", fd);\n\t\t\treturn -1;\n\t\t}\n\t}\n#endif\n\treturn 0;\n}\n\nevutil_socket_t\nccnet_net_accept (evutil_socket_t b, struct sockaddr_storage *cliaddr, \n                  socklen_t *len, int nonblock)\n{\n    evutil_socket_t s;\n    /* int nodelay = 1; */\n    \n    s = accept (b, (struct sockaddr *)cliaddr, len);\n\n    /* setsockopt (s, IPPROTO_TCP, TCP_NODELAY, &nodelay, sizeof(nodelay)); */\n    if (nonblock)\n        makeSocketNonBlocking(s);\n\n    return s;\n}\n\n\nevutil_socket_t\nccnet_net_bind_v4 (const char *ipaddr, int *port)\n{\n    evutil_socket_t sockfd;\n    struct sockaddr_in addr;\n    int on = 1;\n        \n    sockfd = socket (AF_INET, SOCK_STREAM, 0);\n    if (sockfd < 0) {\n        ccnet_warning(\"create socket failed: %s\\n\", strerror(errno));\n        exit(-1);\n    }\n\n    memset (&addr, 0, sizeof (struct sockaddr_in));\n    addr.sin_family = AF_INET;\n    if (inet_aton(ipaddr, &addr.sin_addr) == 0) {\n        ccnet_warning (\"Bad ip address %s\\n\", ipaddr);\n        return -1;\n    }\n    addr.sin_port = htons (*port);\n\n    if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on)) < 0)\n    {\n        ccnet_warning (\"setsockopt of SO_REUSEADDR error: %s\\n\",\n                       strerror(errno));\n        return -1;\n    }\n\n    if ( bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {\n        ccnet_warning (\"Bind error: %s\\n\", strerror (errno));\n        return -1;\n    }\n\n\n    if (*port == 0) {\n        struct sockaddr_storage ss;\n        socklen_t len;\n\n        len = sizeof(ss);\n        if (getsockname(sockfd, (struct sockaddr *)&ss, &len) < 0) {\n            ccnet_warning (\"getsockname error: %s\\n\", strerror(errno));\n            return -1;\n        }\n        *port = sock_port ((struct sockaddr *)&ss);\n    }\n\n    return sockfd;\n}\n\n\n\nchar *\nsock_ntop(const struct sockaddr *sa, socklen_t salen)\n{\n    static char str[128];       /* Unix domain is largest */\n\n    switch (sa->sa_family) {\n    case AF_INET: {\n        struct sockaddr_in  *sin = (struct sockaddr_in *) sa;\n\n        if (evutil_inet_ntop(AF_INET, &sin->sin_addr, str, sizeof(str)) == NULL)\n            return(NULL);\n        return(str);\n    }\n\n#ifdef  IPv6\n    case AF_INET6: {\n        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa;\n\n        if (evutil_inet_ntop(AF_INET6, &sin6->sin6_addr, str, sizeof(str) - 1) == NULL)\n            return(NULL);\n        return (str);\n    }\n#endif\n\n#ifndef WIN32\n#ifdef  AF_UNIX \n    case AF_UNIX: {\n        struct sockaddr_un  *unp = (struct sockaddr_un *) sa;\n\n            /* OK to have no pathname bound to the socket: happens on\n               every connect() unless client calls bind() first. */\n        if (unp->sun_path[0] == 0)\n            strcpy(str, \"(no pathname bound)\");\n        else\n            snprintf(str, sizeof(str), \"%s\", unp->sun_path);\n        return(str);\n    }\n#endif\n#endif\n\n    default:\n        snprintf(str, sizeof(str), \"sock_ntop: unknown AF_xxx: %d, len %d\",\n                 sa->sa_family, salen);\n        return(str);\n    }\n    return (NULL);\n}\n\nint\nsock_pton (const char *addr_str, uint16_t port, struct sockaddr_storage *sa)\n{\n    struct sockaddr_in  *saddr  = (struct sockaddr_in *) sa;\n\n#ifndef WIN32\n    struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *) sa;\n#endif\n\n    if (evutil_inet_pton (AF_INET, addr_str, &saddr->sin_addr) == 1 ) {\n        saddr->sin_family = AF_INET;\n        saddr->sin_port = htons (port);\n        return 0;\n    } \n#ifndef WIN32\n    else if (evutil_inet_pton (AF_INET6, addr_str, &saddr6->sin6_addr) == 1)\n    {\n        saddr6->sin6_family = AF_INET6;\n        saddr6->sin6_port = htons (port);\n        return 0;\n    }\n#endif\n\n    return -1;\n}\n\n/* return 1 if addr_str is a valid ipv4 or ipv6 address */\nint\nis_valid_ipaddr (const char *addr_str)\n{\n    struct sockaddr_storage addr;\n    if (!addr_str)\n        return 0;\n    if (sock_pton(addr_str, 0, &addr) < 0)\n        return 0;\n    return 1;\n}\n\nuint16_t\nsock_port (const struct sockaddr *sa)\n{\n    switch (sa->sa_family) {\n    case AF_INET: {\n        struct sockaddr_in  *sin = (struct sockaddr_in *) sa;\n        return ntohs(sin->sin_port);\n    }\n#ifdef  IPv6\n    case AF_INET6: {\n        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) sa;\n\n        return ntohs(sin6->sin6_port);\n    }\n#endif\n    default:\n        return 0;\n    }\n    return 0;\n}\n\n\nevutil_socket_t\nudp_client (const char *host, const char *serv,\n            struct sockaddr **saptr, socklen_t *lenp)\n{\n\tevutil_socket_t sockfd;\n    int n;\n\tstruct addrinfo\thints, *res, *ressave;\n\n\tmemset (&hints, 0, sizeof(struct addrinfo));\n\thints.ai_family = AF_UNSPEC;\n\thints.ai_socktype = SOCK_DGRAM;\n\n\tif ((n = getaddrinfo(host, serv, &hints, &res)) != 0) {\n        ccnet_warning (\"udp_client error for %s, %s: %s\",\n                       host, serv, gai_strerror(n));\n        return -1;\n    }\n\tressave = res;\n\n\tdo {\n\t\tsockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);\n\t\tif (sockfd >= 0)\n\t\t\tbreak;\t\t/* success */\n\t} while ( (res = res->ai_next) != NULL);\n\n\tif (res == NULL) {\t/* errno set from final socket() */\n\t\tccnet_warning (\"udp_client error for %s, %s\", host, serv);\n        freeaddrinfo (ressave);\n        return -1;\n    }\n\n\t*saptr = malloc(res->ai_addrlen);\n\tmemcpy(*saptr, res->ai_addr, res->ai_addrlen);\n\t*lenp = res->ai_addrlen;\n\n\tfreeaddrinfo(ressave);\n\n\treturn (sockfd);\n}\n\n\nint\nfamily_to_level(int family)\n{\n\tswitch (family) {\n\tcase AF_INET:\n\t\treturn IPPROTO_IP;\n#ifdef\tIPV6\n\tcase AF_INET6:\n\t\treturn IPPROTO_IPV6;\n#endif\n\tdefault:\n\t\treturn -1;\n\t}\n}\n\n#ifdef WIN32\nstatic int\nmcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen,\n\t\t   const char *ifname, u_int ifindex)\n{\n    int optval = 3;\n    int sockm;\n    if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL,\n                  (char *)&optval, sizeof(int)) == SOCKET_ERROR) {\n        ccnet_warning(\"Fail to set socket multicast TTL, LastError=%d\\n\",\n                      WSAGetLastError());\n        return -1;\n    }\n    optval = 0;\n    if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP,\n                  (char *)&optval, sizeof(int)) == SOCKET_ERROR) {\n        ccnet_warning(\"Fail to set socket multicast LOOP, LastError=%d\\n\",\n                      WSAGetLastError());\n        return -1;\n    }\n    sockm = WSAJoinLeaf (sockfd, grp, grplen, NULL, NULL, NULL, NULL, JL_BOTH);\n    if (sockm == INVALID_SOCKET) {\n        ccnet_warning(\"Fail to join multicast group, LastError=%d\\n\",\n                      WSAGetLastError());\n        return -1;\n    }\n    return sockm;\n}\n\nevutil_socket_t\ncreate_multicast_sock (struct sockaddr *sasend, socklen_t salen)\n{\n    int                 ret;\n    const int           on = 1;\n    evutil_socket_t     recvfd;\n    struct sockaddr    *sarecv;\n\n    recvfd = WSASocket (AF_INET, SOCK_DGRAM, 0, NULL, 0,\n                        WSA_FLAG_MULTIPOINT_C_LEAF|WSA_FLAG_MULTIPOINT_D_LEAF\n                        |WSA_FLAG_OVERLAPPED);\n    if (recvfd < 0) {\n        ccnet_warning (\"Create multicast listen socket fails: %d\\n\",\n                       WSAGetLastError());\n        return -1;\n    }\n    ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on));\n    if (ret != 0) {\n        ccnet_warning(\"Failed to setsockopt SO_REUSEADDR, WSAGetLastError=%d\\n\",\n                      WSAGetLastError());\n        return -1;\n    }\n\n    sarecv = malloc(salen);\n    memcpy(sarecv, sasend, salen);\n    struct sockaddr_in *saddr = (struct sockaddr_in *)sarecv;\n    saddr->sin_addr.s_addr = INADDR_ANY;\n\n    if (bind(recvfd, sarecv, salen) < 0) {\n        ccnet_warning(\"Bind multicast bind socket failed LastError=%d\\n\",\n                      WSAGetLastError());\n        free (sarecv);\n        return -1;;\n    }\n    free (sarecv);\n\n    if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) {\n        ccnet_warning (\"mcast_join error: %s\\n\", strerror(errno));\n        return -1;\n    }\n\n    return recvfd;\n}\n#else\nstatic int\nmcast_join(evutil_socket_t sockfd, const struct sockaddr *grp, socklen_t grplen,\n\t\t   const char *ifname, u_int ifindex)\n{\n#if (defined MCAST_JOIN_GROUP) && (! defined __APPLE__)\n\tstruct group_req req;\n\tif (ifindex > 0) {\n\t\treq.gr_interface = ifindex;\n\t} else if (ifname != NULL) {\n\t\tif ( (req.gr_interface = if_nametoindex(ifname)) == 0) {\n\t\t\terrno = ENXIO;\t/* i/f name not found */\n\t\t\treturn(-1);\n\t\t}\n\t} else\n\t\treq.gr_interface = 0;\n\tif (grplen > sizeof(req.gr_group)) {\n\t\terrno = EINVAL;\n\t\treturn -1;\n\t}\n\tmemcpy(&req.gr_group, grp, grplen);\n\treturn (setsockopt(sockfd, family_to_level(grp->sa_family),\n\t\t\tMCAST_JOIN_GROUP, &req, sizeof(req)));\n#else\n/* end mcast_join1 */\n\n/* include mcast_join2 */\n\tswitch (grp->sa_family) {\n\tcase AF_INET: {\n\t\tstruct ip_mreq\t\tmreq;\n\t\tstruct ifreq\t\tifreq;\n\n\t\tmemcpy(&mreq.imr_multiaddr.s_addr,\n\t\t\t   &((const struct sockaddr_in *) grp)->sin_addr,\n\t\t\t   sizeof(struct in_addr));\n\n\t\tif (ifindex > 0) {\n\t\t\tif (if_indextoname(ifindex, ifreq.ifr_name) == NULL) {\n\t\t\t\terrno = ENXIO;\t/* i/f index not found */\n\t\t\t\treturn(-1);\n\t\t\t}\n\t\t\tgoto doioctl;\n\t\t} else if (ifname != NULL) {\n\t\t\tstrncpy(ifreq.ifr_name, ifname, IFNAMSIZ);\ndoioctl:\n\t\t\tif (ioctl(sockfd, SIOCGIFADDR, &ifreq) < 0)\n\t\t\t\treturn(-1);\n\t\t\tmemcpy(&mreq.imr_interface,\n\t\t\t\t   &((struct sockaddr_in *) &ifreq.ifr_addr)->sin_addr,\n\t\t\t\t   sizeof(struct in_addr));\n\t\t} else\n\t\t\tmreq.imr_interface.s_addr = htonl(INADDR_ANY);\n\n\t\treturn(setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP,\n\t\t\t\t\t\t  &mreq, sizeof(mreq)));\n\t}\n/* end mcast_join2 */\n\n/* include mcast_join3 */\n#ifdef\tIPV6\n#ifndef\tIPV6_JOIN_GROUP\t\t/* APIv0 compatibility */\n#define\tIPV6_JOIN_GROUP\t\tIPV6_ADD_MEMBERSHIP\n#endif\n\tcase AF_INET6: {\n\t\tstruct ipv6_mreq\tmreq6;\n\n\t\tmemcpy(&mreq6.ipv6mr_multiaddr,\n\t\t\t   &((const struct sockaddr_in6 *) grp)->sin6_addr,\n\t\t\t   sizeof(struct in6_addr));\n\n\t\tif (ifindex > 0) {\n\t\t\tmreq6.ipv6mr_interface = ifindex;\n\t\t} else if (ifname != NULL) {\n\t\t\tif ( (mreq6.ipv6mr_interface = if_nametoindex(ifname)) == 0) {\n\t\t\t\terrno = ENXIO;\t/* i/f name not found */\n\t\t\t\treturn(-1);\n\t\t\t}\n\t\t} else\n\t\t\tmreq6.ipv6mr_interface = 0;\n\n\t\treturn(setsockopt(sockfd, IPPROTO_IPV6, IPV6_JOIN_GROUP,\n\t\t\t\t\t\t  &mreq6, sizeof(mreq6)));\n\t}\n#endif\n\n\tdefault:\n\t\terrno = EAFNOSUPPORT;\n\t\treturn(-1);\n\t}\n#endif\n\n    return -1;\n}\n\nevutil_socket_t\ncreate_multicast_sock (struct sockaddr *sasend, socklen_t salen)\n{\n    int                 ret;\n    const int           on = 1;\n    evutil_socket_t     recvfd;\n    struct sockaddr    *sarecv;\n\n    if ( (recvfd = socket (sasend->sa_family, SOCK_DGRAM, 0)) < 0) {\n        ccnet_warning (\"Create multicast listen socket fails: %s\\n\",\n                     strerror(errno));\n        return -1;\n    }\n    ret = setsockopt(recvfd, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof(on));\n    if (ret < 0)\n        ccnet_warning(\"Failed to setsockopt SO_REUSEADDR\\n\");\n    sarecv = malloc(salen);\n    memcpy(sarecv, sasend, salen);\n\n    if (bind(recvfd, sarecv, salen) < 0) {\n        ccnet_warning (\"Bind multicast listen socket fails: %s\\n\",\n                       strerror(errno));\n        free (sarecv);\n        return -1;\n    }\n    free (sarecv);\n\n    if (mcast_join(recvfd, sasend, salen, NULL, 0) < 0) {\n        ccnet_warning (\"mcast_join error: %s\\n\", strerror(errno));\n        return -1;\n    }\n\n    return recvfd;\n}\n\n#endif\n\nint\nsockfd_to_family(evutil_socket_t sockfd)\n{\n\tstruct sockaddr_storage ss;\n\tsocklen_t\tlen;\n\n\tlen = sizeof(ss);\n\tif (getsockname(sockfd, (struct sockaddr *) &ss, &len) < 0)\n\t\treturn(-1);\n\treturn(ss.ss_family);\n}\n\nint\nmcast_set_loop(evutil_socket_t sockfd, int onoff)\n{\n#ifndef WIN32\n\n\tswitch (sockfd_to_family(sockfd)) {\n\tcase AF_INET: {\n\t\tu_char\t\tflag;\n\n\t\tflag = onoff;\n\t\treturn(setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_LOOP,\n\t\t\t\t\t\t  &flag, sizeof(flag)));\n\t}\n\n#ifdef\tIPV6\n\tcase AF_INET6: {\n\t\tu_int\t\tflag;\n\n\t\tflag = onoff;\n\t\treturn(setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_LOOP,\n\t\t\t\t\t\t  &flag, sizeof(flag)));\n\t}\n#endif\n\n\tdefault:\n\t\terrno = EAFNOSUPPORT;\n\t\treturn(-1);\n\t}\n\n#else\n    return -1;\n#endif  /* WIN32 */\n}\n"
  },
  {
    "path": "lib/net.h",
    "content": "\n#ifndef CCNET_NET_H\n#define CCNET_NET_H\n\n#ifdef WIN32\n    #include <inttypes.h>\n    #include <winsock2.h>\n    #include <ws2tcpip.h>\n    typedef int socklen_t;\n    #define UNUSED \n#else\n    #include <sys/types.h>\n    #include <sys/socket.h>\n    #include <netinet/in.h>\n    #include <arpa/inet.h>\n    #include <netdb.h>\n    #include <sys/un.h>\n    #include <net/if.h>\n    #include <netinet/tcp.h>\n#endif\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/util.h>\n#else\n#include <evutil.h>\n#endif\n\n#ifdef WIN32\n    /* #define ECONNREFUSED WSAECONNREFUSED */\n    /* #define ECONNRESET   WSAECONNRESET */\n    /* #define EHOSTUNREACH WSAEHOSTUNREACH */\n    /* #define EINPROGRESS  WSAEINPROGRESS */\n    /* #define ENOTCONN     WSAENOTCONN */\n    /* #define EWOULDBLOCK  WSAEWOULDBLOCK */\n    #define sockerrno WSAGetLastError( )\n#else\n    #include <errno.h>\n    #define sockerrno errno\n#endif\n\n#ifdef WIN32\nextern int inet_aton(const char *string, struct in_addr *addr);\nextern const char *inet_ntop(int af, const void *src, char *dst, size_t size);\nextern int inet_pton(int af, const char *src, void *dst);\n#endif\n\nevutil_socket_t ccnet_net_open_tcp (const struct sockaddr *sa, int nonblock);\nevutil_socket_t ccnet_net_bind_tcp (int port, int nonblock);\nevutil_socket_t ccnet_net_accept (evutil_socket_t b, \n                                  struct sockaddr_storage *cliaddr,\n                                  socklen_t *len, int nonblock);\n\nint ccnet_net_make_socket_blocking (evutil_socket_t fd);\n\n/* bind to an IPv4 address, if (*port == 0) the port number will be returned */\nevutil_socket_t ccnet_net_bind_v4 (const char *ipaddr, int *port);\n\nint  ccnet_netSetTOS   ( evutil_socket_t s, int tos );\n\nchar *sock_ntop(const struct sockaddr *sa, socklen_t salen);\nuint16_t sock_port (const struct sockaddr *sa);\n\n/* return 1 if addr_str is a valid ipv4 or ipv6 address */\nint is_valid_ipaddr (const char *addr_str);\n\n\n/* return 0 if success, -1 if error */\nint sock_pton (const char *addr_str, uint16_t port, \n               struct sockaddr_storage *sa);\n\nevutil_socket_t udp_client (const char *host, const char *serv,\n                struct sockaddr **saptr, socklen_t *lenp);\n\nint mcast_set_loop(evutil_socket_t sockfd, int onoff);\n\nevutil_socket_t create_multicast_sock (struct sockaddr *sasend, socklen_t salen);\n\n#endif\n"
  },
  {
    "path": "lib/repo.vala",
    "content": "namespace Seafile {\n\npublic class Repo : Object {\n\n    // Section 1: Basic information\n    // Members in this section should be set for every Repo object\n\n    // _id is for fast access from c code. id is for\n    // vala to automatically generate a property. Note,\n    // if a Vala property is start with _, it is not\n    // translated into a GObject property.\n\n    // Due to performance reasons, 'desc', 'magic', 'enc_version', 'root', 'repaired', 'random_key'\n    // are no longer returned in listing repos API.\n\n    public char _id[37];\n    public string id {\n        get { return (string)_id; }\n        set { Posix.memcpy(_id, value, 36); _id[36] = '\\0'; }\n    }\n\n    public string _name;\n    public string name {\n        get { return _name; }\n        set { _name = value; }\n    }\n\n    public string _desc;        // description\n    public string desc {\n        get { return _desc; }\n        set { _desc = value; }\n    }\n\n    // data format version\n    public int version { get; set; }\n\n    public int64  last_modify { get; set; }\n    public int64  size { get; set; }\n    public int64  file_count { get; set; }\n    public string last_modifier { get; set; }\n    public string head_cmmt_id { get; set; }\n    public string root { get; set; }\n    public int    status { get; set; }\n    public string repo_type { get; set; }\n\n    // To be compatible with obsoleted SharedRepo object\n    public string repo_id { get; set; }\n    public string repo_name { get; set; }\n    public string repo_desc { get; set; }\n    public int64 last_modified { get; set; }\n\n    // Section 2: Encryption related\n    // Members in this section should be set for every Repo object\n\n    public bool encrypted { get; set; }\n    public string magic { get; set; }\n    public int enc_version { get; set; }\n    public string random_key { get; set; }\n    public string salt { get; set; }\n    public string pwd_hash { get; set; }\n    public string pwd_hash_algo { get; set; }\n    public string pwd_hash_params { get; set; }\n\n    // Section 3: Client only information\n    // Should be set for all client repo objects\n\n    public string _worktree;\n    public string worktree {\n        get { return _worktree; }\n        set { _worktree = value; }\n    }\n    public string _relay_id;\n    public string relay_id {\n        get { return _relay_id; }\n        set { _relay_id = value; }\n    }\n    public int  last_sync_time { get; set; }\n    public bool auto_sync { get; set; }\n    public bool worktree_invalid { get; set; }\n\n    // Section 4: Server only information\n    // Should be set for all server repo objects\n\n    // virutal repo related\n    public bool is_virtual { get; set; }\n    public string origin_repo_id { get; set; }\n    public string origin_repo_name { get; set; }\n    public string origin_path { get; set; }\n    public bool is_original_owner { get; set; }\n    public string virtual_perm { get; set; }\n\n    // Used to access fs objects\n    public string store_id { get; set; }\n\n    public bool is_corrupted { get; set; }\n    public bool repaired { get; set; }\n\n    // Section 5: Share information\n    // Only set in list_share_repos, get_group_repos and get_inner_pub_repos, etc\n\n    public string share_type { get; set; } // personal, group or public\n    public string permission { get; set; }\n    public string user { get; set; } // share from or share to\n    public int group_id { get; set; } // used when shared to group\n    public string group_name { get; set; } // used when shared to group\n\n    // For list_owned_repo\n    public bool is_shared { get; set; }\n}\n\npublic class TrashRepo : Object {\n\n    public string repo_id { get; set; }\n    public string repo_name { get; set; }\n    public string head_id { get; set; }\n    public string owner_id { get; set; }\n    public int64 size { get; set; }\n    public int64 del_time { get; set; }\n    public bool encrypted { get; set; }\n}\n\npublic class SyncInfo : Object {\n\n    public string repo_id { get; set; }\n    public string head_commit { get; set; }\n    public bool deleted_on_relay { get; set; }\n    public bool bad_local_branch { get; set; }\n    public bool need_fetch { get; set; }\n    public bool need_upload { get; set; }\n    public bool need_merge { get; set; }\n    // public int last_sync_time { get; set; }\n}\n\npublic class SyncTask : Object {\n\n    public bool is_sync_lan { get; set; }\n    public bool force_upload { get; set; }\n    public string dest_id { get; set; }\n    public string repo_id { get; set; }\n    public string state { get; set; }\n    public string error { get; set; }\n    public string tx_id { get; set; }\n}\n\npublic class SessionInfo : Object {\n    public string datadir { get; set; }\n}\n\npublic class CheckoutTask : Object {\n\n    public string repo_id { get; set; }\n    public string worktree { get; set; }\n    public int total_files { get; set; }\n    public int finished_files { get; set; }\n}\n\npublic class DiffEntry : Object {\n\n    public string status { get; set; }\n    public string name { get; set; }\n    public string new_name { get; set; }\n}\n\npublic class DeletedEntry : Object {\n\n    public string commit_id { get; set; }\n    public string obj_id { get; set; }\n    public string obj_name { get; set; }\n    public string basedir { get; set; }\n    public int mode { get; set; }\n    public int delete_time { get; set; }\n    public int64 file_size { get; set; }\n    public string scan_stat { get; set; }\n}\n\npublic class RepoTokenInfo: Object {\n    public string repo_id { get; set; }\n    public string repo_name { get; set; }\n    public string repo_owner { get; set; }\n    public string email { get; set; }\n    public string token { get; set; }\n\n    public string peer_id { get; set; }\n    public string peer_ip { get; set; }\n    public string peer_name { get; set; }\n    public int64 sync_time { get; set; }\n    public string client_ver { get; set; }\n}\n\npublic class SharedUser : Object {\n    public string repo_id { get; set; }\n    public string user { get; set; }\n    public string perm { get; set; }\n}\n\npublic class SharedGroup : Object {\n    public string repo_id { get; set; }\n    public int group_id { get; set; }\n    public string perm { get; set; }\n}\n\npublic class EncryptionInfo: Object {\n    public string repo_id { get; set; }\n    public string passwd { get; set; }\n    public int enc_version { get; set; }\n    public string magic { get; set; }\n    public string random_key { get; set; }\n    public string salt { get; set; }\n    public string pwd_hash { get; set; }\n    public string pwd_hash_algo { get; set; }\n    public string pwd_hash_params { get; set; }\n}\n\npublic class UserQuotaUsage: Object {\n    public string user { get; set; }\n    public int64 usage { get; set; }\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/rpc_table.py",
    "content": "\"\"\"\nDefine RPC functions needed to generate\n\"\"\"\n\n# [ <ret-type>, [<arg_types>] ]\nfunc_table = [\n    [ \"int\", [] ],\n    [ \"int\", [\"int\"] ],\n    [ \"int\", [\"int\", \"int\"] ],\n    [ \"int\", [\"int\", \"string\"] ],\n    [ \"int\", [\"int\", \"string\", \"int\"] ],\n    [ \"int\", [\"int\", \"string\", \"string\"] ],\n    [ \"int\", [\"int\", \"string\", \"int\", \"int\"] ],    \n    [ \"int\", [\"int\", \"int\", \"string\", \"string\"] ],\n    [ \"int\", [\"int\", \"string\", \"string\", \"int\"] ],\n    [ \"int\", [\"string\"] ],\n    [ \"int\", [\"string\", \"int\"] ],\n    [ \"int\", [\"string\", \"int\", \"int\"] ],\n    [ \"int\", [\"string\", \"int\", \"string\"] ],\n    [ \"int\", [\"string\", \"int\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"int\", \"int\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"int\"] ],\n    [ \"int\", [\"string\", \"string\", \"int64\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"int\", \"int\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"int\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"int\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"int\", \"string\", \"int\", \"int\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"int\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"int\", [\"string\", \"int64\"]],\n    [ \"int\", [\"int\", \"int64\"]],\n    [ \"int\", [\"int\", \"string\", \"int64\"]],\n    [ \"int64\", [] ],\n    [ \"int64\", [\"string\"] ],\n    [ \"int64\", [\"int\"]],\n    [ \"int64\", [\"int\", \"string\"]],\n    [ \"int64\", [\"string\", \"string\"]],\n    [ \"int64\", [\"string\", \"int\", \"string\"] ],\n    [ \"string\", [] ],\n    [ \"string\", [\"int\"] ],\n    [ \"string\", [\"int\", \"int\"] ],\n    [ \"string\", [\"int\", \"string\"] ],\n    [ \"string\", [\"int\", \"int\", \"string\"] ],\n    [ \"string\", [\"string\"] ],\n    [ \"string\", [\"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"int\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"int\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"int\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int64\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int64\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int64\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"string\", \"string\"] ],\n    [ \"string\", [\"string\", \"int\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"string\"] ],\n    [ \"string\", [\"string\", \"int\", \"string\", \"int\", \"int\"] ],\n    [ \"string\", [\"string\", \"int\", \"string\", \"string\", \"string\"] ],\n    [ \"objlist\", [] ],\n    [ \"objlist\", [\"int\"] ],\n    [ \"objlist\", [\"int\", \"int\"] ],\n    [ \"objlist\", [\"int\", \"string\"] ],\n    [ \"objlist\", [\"int\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"int\", \"int\", \"string\"] ],\n    [ \"objlist\", [\"int\", \"int\", \"string\", \"int\"] ],\n    [ \"objlist\", [\"string\"] ],        \n    [ \"objlist\", [\"string\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"string\"] ],\n    [ \"objlist\", [\"string\", \"string\"] ],        \n    [ \"objlist\", [\"string\", \"string\", \"string\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"string\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"int\", \"string\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"int\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"string\", \"int\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"int\", \"string\", \"string\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"string\", \"string\", \"string\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"string\", \"int\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"int\", \"string\", \"string\", \"int\"] ],\n    [ \"objlist\", [\"string\", \"string\", \"string\", \"string\", \"int\", \"int\"] ],\n    [ \"object\", [] ],\n    [ \"object\", [\"int\"] ],\n    [ \"object\", [\"string\"] ],\n    [ \"object\", [\"string\", \"string\"] ],\n    [ \"object\", [\"string\", \"string\", \"string\"] ],\n    [ \"object\", [\"string\", \"int\", \"string\"] ],\n    [ \"object\", [\"int\", \"string\", \"string\"] ],\n    [ \"object\", [\"int\", \"string\", \"string\", \"string\", \"string\"] ],\n    [ \"object\", [\"string\", \"string\", \"int\", \"int\"] ],\n    [ \"object\", [\"string\", \"string\", \"string\", \"int\"] ],\n    [ \"object\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"int\"] ],\n    [ \"object\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"int\", \"int\"] ],\n    [\"json\", [\"string\"]],\n]\n"
  },
  {
    "path": "lib/seahub.vala",
    "content": "namespace Seafile {\n\npublic class ShareLinkInfo : Object {\n       public string repo_id { set; get; }\n       public string file_path { set; get; }\n       public string parent_dir { set; get; }\n       public string share_type { set; get; }\n}\n\n}\n"
  },
  {
    "path": "lib/search-result.vala",
    "content": "// compile this file with `valac --pkg posix repo.vala -C -H repo.h`\n\nnamespace Seafile {\n\npublic class SearchResult: Object {\n\n    public string _path;\n    public string path {\n        get { return _path; }\n        set { _path = value; }\n    }\n\n    public int64  size { get; set; }\n    public int64  mtime { get; set; }\n    public bool is_dir { set; get; }\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/task.vala",
    "content": "namespace Seafile {\n\npublic class Task : Object {\n\n\tpublic char _tx_id[37];\n\tpublic string tx_id {\n\t\tget { return (string)_tx_id; }\n\t\tset { Posix.memcpy(_tx_id, value, 36); _tx_id[36] = '\\0'; }\n\t}\n\n    public string ttype { get; set; }\n\n\tpublic string repo_id { get; set; }\n\n\tpublic string dest_id { get; set; }\n\tpublic string from_branch { get; set; }\n\n\tpublic string to_branch { get; set; }\n\n\tpublic string state { get; set; }\n\n\tpublic string rt_state { get; set; }\n\n\tpublic string error_str { get; set; }\n\n    public int block_total { get; set; }\n    public int block_done { get; set; } // the number of blocks sent or received\n\n    public int fs_objects_total { get; set; }\n    public int fs_objects_done { get; set; }\n\n\tpublic int rate { get; set; }\n\n\tpublic int64 _rsize;\t\t// the size remain\n\tpublic int64  rsize{\n\t\tget { return _rsize; }\n\t\tset { _rsize = value; }\n\t}\n\n\tpublic int64 _dsize;\t\t// the size has done\n\tpublic int64 dsize {\n\t\tget { return _dsize; }\n\t\tset { _dsize = value; }\n\t}\n\n}\n\npublic class CloneTask : Object {\n       public string state { get; set; }\n       public string error_str { get; set; }\n       public string repo_id { get; set; }\n       public string peer_id { get; set; }\n       public string repo_name { get; set; }\n       public string worktree { get; set; }\n       public string tx_id { get; set; }\n}\n\n} // namespace\n"
  },
  {
    "path": "lib/timer.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#include <event2/event_compat.h>\n#include <event2/event_struct.h>\n#else\n#include <event.h>\n#endif\n\n#include <sys/time.h>\n\n#include \"utils.h\"\n\n#include \"timer.h\"\n\nstruct CcnetTimer\n{\n    struct event   event;\n    struct timeval tv;\n    TimerCB        func;\n    void          *user_data;\n    uint8_t        inCallback;\n};\n\nstatic void\ntimer_callback (evutil_socket_t fd, short event, void *vtimer)\n{\n    int more;\n    struct CcnetTimer *timer = vtimer;\n\n    timer->inCallback = 1;\n    more = (*timer->func) (timer->user_data);\n    timer->inCallback = 0;\n\n    if (more)\n        evtimer_add (&timer->event, &timer->tv);\n    else\n        ccnet_timer_free (&timer);\n}\n\nvoid\nccnet_timer_free (CcnetTimer **ptimer)\n{\n    CcnetTimer *timer;\n\n    /* zero out the argument passed in */\n    g_return_if_fail (ptimer);\n\n    timer = *ptimer;\n    *ptimer = NULL;\n\n    /* destroy the timer directly or via the command queue */\n    if (timer && !timer->inCallback)\n    {\n        event_del (&timer->event);\n        g_free (timer);\n    }\n}\n\nCcnetTimer*\nccnet_timer_new (TimerCB         func,\n                 void           *user_data,\n                 uint64_t        interval_milliseconds)\n{\n    CcnetTimer *timer = g_new0 (CcnetTimer, 1);\n\n    timer->tv = timeval_from_msec (interval_milliseconds);\n    timer->func = func;\n    timer->user_data = user_data;\n\n    evtimer_set (&timer->event, timer_callback, timer);\n    evtimer_add (&timer->event, &timer->tv);\n\n    return timer;\n}\n"
  },
  {
    "path": "lib/timer.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef CCNET_TIMER_H\n#define CCNET_TIMER_H\n\n/* return TRUE to reschedule the timer, return FALSE to cancle the timer */\ntypedef int (*TimerCB) (void *data);\n\nstruct CcnetTimer;\n\ntypedef struct CcnetTimer CcnetTimer;\n\n/**\n * Calls timer_func(user_data) after the specified interval.\n * The timer is freed if timer_func returns zero.\n * Otherwise, it's called again after the same interval.\n */\nCcnetTimer* ccnet_timer_new (TimerCB           func,\n                             void             *user_data,\n                             uint64_t          timeout_milliseconds);\n\n/**\n * Frees a timer and sets the timer pointer to NULL.\n */\nvoid ccnet_timer_free (CcnetTimer **timer);\n\n\n#endif\n"
  },
  {
    "path": "lib/utils.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include <config.h>\n\n#include \"common.h\"\n\n#ifdef WIN32\n#ifndef _WIN32_WINNT\n#define _WIN32_WINNT 0x500\n#endif\n#endif\n\n#include \"utils.h\"\n\n#ifdef WIN32\n\n#include <windows.h>\n#include <winsock2.h>\n#include <ws2tcpip.h>\n#include <Rpc.h>\n#include <shlobj.h>\n#include <psapi.h>\n\n#else\n#include <arpa/inet.h>\n#endif\n\n#ifndef WIN32\n#include <pwd.h>\n#include <uuid/uuid.h>\n#endif\n\n#include <unistd.h>\n#include <sys/types.h>\n#include <fcntl.h>\n#include <sys/stat.h>\n#include <dirent.h>\n#include <errno.h>\n#include <limits.h>\n#include <stdarg.h>\n\n#include <string.h>\n#include <openssl/sha.h>\n#include <openssl/hmac.h>\n#include <openssl/evp.h>\n#include <openssl/bio.h>\n#include <openssl/buffer.h>\n\n#include <glib.h>\n#include <glib/gstdio.h>\n#include <searpc-utils.h>\n\n#include <jansson.h>\n\n#include <utime.h>\n\n#include <zlib.h>\n\nextern int inet_pton(int af, const char *src, void *dst);\n\n\nstruct timeval\ntimeval_from_msec (uint64_t milliseconds)\n{\n    struct timeval ret;\n    const uint64_t microseconds = milliseconds * 1000;\n    ret.tv_sec  = microseconds / 1000000;\n    ret.tv_usec = microseconds % 1000000;\n    return ret;\n}\n\nvoid\nrawdata_to_hex (const unsigned char *rawdata, char *hex_str, int n_bytes)\n{\n    static const char hex[] = \"0123456789abcdef\";\n    int i;\n\n    for (i = 0; i < n_bytes; i++) {\n        unsigned int val = *rawdata++;\n        *hex_str++ = hex[val >> 4];\n        *hex_str++ = hex[val & 0xf];\n    }\n    *hex_str = '\\0';\n}\n\nstatic unsigned hexval(char c)\n{\n    if (c >= '0' && c <= '9')\n        return c - '0';\n    if (c >= 'a' && c <= 'f')\n        return c - 'a' + 10;\n    if (c >= 'A' && c <= 'F')\n        return c - 'A' + 10;\n    return ~0;\n}\n\nint\nhex_to_rawdata (const char *hex_str, unsigned char *rawdata, int n_bytes)\n{\n    int i;\n    for (i = 0; i < n_bytes; i++) {\n        unsigned int val = (hexval(hex_str[0]) << 4) | hexval(hex_str[1]);\n        if (val & ~0xff)\n            return -1;\n        *rawdata++ = val;\n        hex_str += 2;\n    }\n    return 0;\n}\n\nsize_t\nccnet_strlcpy (char *dest, const char *src, size_t size)\n{\n    size_t ret = strlen(src);\n\n    if (size) {\n        size_t len = (ret >= size) ? size - 1 : ret;\n        memcpy(dest, src, len);\n        dest[len] = '\\0';\n    }\n    return ret;\n}\n\n\nint\ncheckdir (const char *dir)\n{\n    SeafStat st;\n\n#ifdef WIN32\n    /* remove trailing '\\\\' */\n    char *path = g_strdup(dir);\n    char *p = (char *)path + strlen(path) - 1;\n    while (*p == '\\\\' || *p == '/') *p-- = '\\0';\n    if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode)) {\n        g_free (path);\n        return -1;\n    }\n    g_free (path);\n    return 0;\n#else\n    if ((seaf_stat(dir, &st) < 0) || !S_ISDIR(st.st_mode))\n        return -1;\n    return 0;\n#endif\n}\n\nint\ncheckdir_with_mkdir (const char *dir)\n{\n#ifdef WIN32\n    int ret;\n    char *path = g_strdup(dir);\n    char *p = (char *)path + strlen(path) - 1;\n    while (*p == '\\\\' || *p == '/') *p-- = '\\0';\n    ret = g_mkdir_with_parents(path, 0755);\n    g_free (path);\n    return ret;\n#else\n    return g_mkdir_with_parents(dir, 0755);\n#endif\n}\n\nint\nobjstore_mkdir (const char *base)\n{\n    int ret;\n    int i, j, len;\n    static const char hex[] = \"0123456789abcdef\";\n    char subdir[SEAF_PATH_MAX];\n\n    if ( (ret = checkdir_with_mkdir(base)) < 0)\n        return ret;\n\n    len = strlen(base);\n    memcpy(subdir, base, len);\n    subdir[len] = G_DIR_SEPARATOR;\n    subdir[len+3] = '\\0';\n\n    for (i = 0; i < 16; i++) {\n        subdir[len+1] = hex[i];\n        for (j = 0; j < 16; j++) {\n            subdir[len+2] = hex[j];\n            if ( (ret = checkdir_with_mkdir(subdir)) < 0)\n                return ret;\n        }\n    }\n    return 0;\n}\n\nvoid\nobjstore_get_path (char *path, const char *base, const char *obj_id)\n{\n    int len;\n\n    len = strlen(base);\n    memcpy(path, base, len);\n    path[len] = G_DIR_SEPARATOR;\n    path[len+1] = obj_id[0];\n    path[len+2] = obj_id[1];\n    path[len+3] = G_DIR_SEPARATOR;\n    strcpy(path+len+4, obj_id+2);\n}\n\n#ifdef WIN32\n\n/* UNIX epoch expressed in Windows time, the unit is 100 nanoseconds.\n * See http://msdn.microsoft.com/en-us/library/ms724228\n */\n#define UNIX_EPOCH 116444736000000000ULL\n\n__time64_t\nfile_time_to_unix_time (FILETIME *ftime)\n{\n    guint64 win_time, unix_time;\n\n    win_time = (guint64)ftime->dwLowDateTime + (((guint64)ftime->dwHighDateTime)<<32);\n    unix_time = (win_time - UNIX_EPOCH)/10000000;\n\n    return (__time64_t)unix_time;\n}\n\nstatic int\nget_utc_file_time_fd (int fd, __time64_t *mtime, __time64_t *ctime)\n{\n    HANDLE handle;\n    FILETIME write_time, create_time;\n\n    handle = (HANDLE)_get_osfhandle (fd);\n    if (handle == INVALID_HANDLE_VALUE) {\n        g_warning (\"Failed to get handle from fd: %lu.\\n\", GetLastError());\n        return -1;\n    }\n\n    if (!GetFileTime (handle, &create_time, NULL, &write_time)) {\n        g_warning (\"Failed to get file time: %lu.\\n\", GetLastError());\n        return -1;\n    }\n\n    *mtime = file_time_to_unix_time (&write_time);\n    *ctime = file_time_to_unix_time (&create_time);\n\n    return 0;\n}\n\n#define EPOCH_DIFF 11644473600ULL\n\ninline static void\nunix_time_to_file_time (guint64 unix_time, FILETIME *ftime)\n{\n    guint64 win_time;\n\n    win_time = (unix_time + EPOCH_DIFF) * 10000000;\n    ftime->dwLowDateTime = win_time & 0xFFFFFFFF;\n    ftime->dwHighDateTime = (win_time >> 32) & 0xFFFFFFFF;\n}\n\nstatic int\nset_utc_file_time (const char *path, const wchar_t *wpath, guint64 mtime)\n{\n    HANDLE handle;\n    FILETIME write_time;\n\n    handle = CreateFileW (wpath,\n                          GENERIC_WRITE,\n                          FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n                          NULL,\n                          OPEN_EXISTING,\n                          FILE_FLAG_BACKUP_SEMANTICS,\n                          NULL);\n    if (handle == INVALID_HANDLE_VALUE) {\n        g_warning (\"Failed to open %s: %lu.\\n\", path, GetLastError());\n        return -1;\n    }\n\n    unix_time_to_file_time (mtime, &write_time);\n\n    if (!SetFileTime (handle, NULL, NULL, &write_time)) {\n        g_warning (\"Failed to set file time for %s: %lu.\\n\", path, GetLastError());\n        CloseHandle (handle);\n        return -1;\n    }\n    CloseHandle (handle);\n\n    return 0;\n}\n\nwchar_t *\nwin32_long_path (const char *path)\n{\n    char *long_path, *p;\n    wchar_t *long_path_w;\n\n    if (strncmp(path, \"//\", 2) == 0)\n        long_path = g_strconcat (\"\\\\\\\\?\\\\UNC\\\\\", path + 2, NULL);\n    else\n        long_path = g_strconcat (\"\\\\\\\\?\\\\\", path, NULL);\n    for (p = long_path; *p != 0; ++p)\n        if (*p == '/')\n            *p = '\\\\';\n\n    long_path_w = g_utf8_to_utf16 (long_path, -1, NULL, NULL, NULL);\n\n    g_free (long_path);\n    return long_path_w;\n}\n\n/* Convert a (possible) 8.3 format path to long path */\nwchar_t *\nwin32_83_path_to_long_path (const char *worktree, const wchar_t *path, int path_len)\n{\n    wchar_t *worktree_w = g_utf8_to_utf16 (worktree, -1, NULL, NULL, NULL);\n    int wt_len;\n    wchar_t *p;\n    wchar_t *fullpath_w = NULL;\n    wchar_t *fullpath_long = NULL;\n    wchar_t *ret = NULL;\n    char *fullpath;\n\n    for (p = worktree_w; *p != L'\\0'; ++p)\n        if (*p == L'/')\n            *p = L'\\\\';\n\n    wt_len = wcslen(worktree_w);\n\n    fullpath_w = g_new0 (wchar_t, wt_len + path_len + 6);\n    wcscpy (fullpath_w, L\"\\\\\\\\?\\\\\");\n    wcscat (fullpath_w, worktree_w);\n    wcscat (fullpath_w, L\"\\\\\");\n    wcsncat (fullpath_w, path, path_len);\n\n    fullpath_long = g_new0 (wchar_t, SEAF_PATH_MAX);\n\n    DWORD n = GetLongPathNameW (fullpath_w, fullpath_long, SEAF_PATH_MAX);\n    if (n == 0) {\n        /* Failed. */\n        fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL);\n        g_free (fullpath);\n\n        goto out;\n    } else if (n > SEAF_PATH_MAX) {\n        /* In this case n is the necessary length for the buf. */\n        g_free (fullpath_long);\n        fullpath_long = g_new0 (wchar_t, n);\n\n        if (GetLongPathNameW (fullpath_w, fullpath_long, n) != (n - 1)) {\n            fullpath = g_utf16_to_utf8 (fullpath_w, -1, NULL, NULL, NULL);\n            g_free (fullpath);\n\n            goto out;\n        }\n    }\n\n    /* Remove \"\\\\?\\worktree\\\" from the beginning. */\n    ret = wcsdup (fullpath_long + wt_len + 5);\n\nout:\n    g_free (worktree_w);\n    g_free (fullpath_w);\n    g_free (fullpath_long);\n\n    return ret;\n}\n\nstatic int\nwindows_error_to_errno (DWORD error)\n{\n    switch (error) {\n    case ERROR_FILE_NOT_FOUND:\n    case ERROR_PATH_NOT_FOUND:\n        return ENOENT;\n    case ERROR_ALREADY_EXISTS:\n        return EEXIST;\n    case ERROR_ACCESS_DENIED:\n    case ERROR_SHARING_VIOLATION:\n        return EACCES;\n    case ERROR_DIR_NOT_EMPTY:\n        return ENOTEMPTY;\n    default:\n        return 0;\n    }\n}\n\n#endif\n\nint\nseaf_stat (const char *path, SeafStat *st)\n{\n#ifdef WIN32\n    wchar_t *wpath = win32_long_path (path);\n    WIN32_FILE_ATTRIBUTE_DATA attrs;\n    int ret = 0;\n\n    if (!GetFileAttributesExW (wpath, GetFileExInfoStandard, &attrs)) {\n        ret = -1;\n        errno = windows_error_to_errno (GetLastError());\n        goto out;\n    }\n\n    memset (st, 0, sizeof(SeafStat));\n\n    if (attrs.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)\n        st->st_mode = (S_IFDIR | S_IRWXU);\n    else\n        st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR);\n\n    st->st_atime = file_time_to_unix_time (&attrs.ftLastAccessTime);\n    st->st_ctime = file_time_to_unix_time (&attrs.ftCreationTime);\n    st->st_mtime = file_time_to_unix_time (&attrs.ftLastWriteTime);\n\n    st->st_size = ((((__int64)attrs.nFileSizeHigh)<<32) + attrs.nFileSizeLow);\n\nout:\n    g_free (wpath);\n\n    return ret;\n#else\n    return stat (path, st);\n#endif\n}\n\nint\nseaf_fstat (int fd, SeafStat *st)\n{\n#ifdef WIN32\n    if (_fstat64 (fd, st) < 0)\n        return -1;\n\n    if (get_utc_file_time_fd (fd, &st->st_mtime, &st->st_ctime) < 0)\n        return -1;\n\n    return 0;\n#else\n    return fstat (fd, st);\n#endif\n}\n\n#ifdef WIN32\n\nvoid\nseaf_stat_from_find_data (WIN32_FIND_DATAW *fdata, SeafStat *st)\n{\n    memset (st, 0, sizeof(SeafStat));\n\n    if (fdata->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)\n        st->st_mode = (S_IFDIR | S_IRWXU);\n    else\n        st->st_mode = (S_IFREG | S_IRUSR | S_IWUSR);\n\n    st->st_atime = file_time_to_unix_time (&fdata->ftLastAccessTime);\n    st->st_ctime = file_time_to_unix_time (&fdata->ftCreationTime);\n    st->st_mtime = file_time_to_unix_time (&fdata->ftLastWriteTime);\n\n    st->st_size = ((((__int64)fdata->nFileSizeHigh)<<32) + fdata->nFileSizeLow);\n}\n\n#endif\n\nint\nseaf_set_file_time (const char *path, guint64 mtime)\n{\n#ifndef WIN32\n    struct stat st;\n    struct utimbuf times;\n\n    if (stat (path, &st) < 0) {\n        g_warning (\"Failed to stat %s: %s.\\n\", path, strerror(errno));\n        return -1;\n    }\n\n    times.actime = st.st_atime;\n    times.modtime = (time_t)mtime;\n\n    return utime (path, &times);\n#else\n    wchar_t *wpath = win32_long_path (path);\n    int ret = 0;\n\n    if (set_utc_file_time (path, wpath, mtime) < 0)\n        ret = -1;\n\n    g_free (wpath);\n    return ret;\n#endif\n}\n\nint\nseaf_util_unlink (const char *path)\n{\n#ifdef WIN32\n    wchar_t *wpath = win32_long_path (path);\n    int ret = 0;\n\n    if (!DeleteFileW (wpath)) {\n        ret = -1;\n        errno = windows_error_to_errno (GetLastError());\n    }\n\n    g_free (wpath);\n    return ret;\n#else\n    return unlink (path);\n#endif\n}\n\nint\nseaf_util_rmdir (const char *path)\n{\n#ifdef WIN32\n    wchar_t *wpath = win32_long_path (path);\n    int ret = 0;\n\n    if (!RemoveDirectoryW (wpath)) {\n        ret = -1;\n        errno = windows_error_to_errno (GetLastError());\n    }\n\n    g_free (wpath);\n    return ret;\n#else\n    return rmdir (path);\n#endif\n}\n\nint\nseaf_util_mkdir (const char *path, mode_t mode)\n{\n#ifdef WIN32\n    wchar_t *wpath = win32_long_path (path);\n    int ret = 0;\n\n    if (!CreateDirectoryW (wpath, NULL)) {\n        ret = -1;\n        errno = windows_error_to_errno (GetLastError());\n    }\n\n    g_free (wpath);\n    return ret;\n#else\n    return mkdir (path, mode);\n#endif\n}\n\nint\nseaf_util_open (const char *path, int flags)\n{\n#ifdef WIN32\n    wchar_t *wpath;\n    DWORD access = 0;\n    HANDLE handle;\n    int fd;\n\n    access |= GENERIC_READ;\n    if (flags & (O_WRONLY | O_RDWR))\n        access |= GENERIC_WRITE;\n\n    wpath = win32_long_path (path);\n\n    handle = CreateFileW (wpath,\n                          access,\n                          FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,\n                          NULL,\n                          OPEN_EXISTING,\n                          0,\n                          NULL);\n    if (handle == INVALID_HANDLE_VALUE) {\n        errno = windows_error_to_errno (GetLastError());\n        g_free (wpath);\n        return -1;\n    }\n\n    fd = _open_osfhandle ((intptr_t)handle, 0);\n\n    g_free (wpath);\n    return fd;\n#else\n    return open (path, flags);\n#endif\n}\n\nint\nseaf_util_create (const char *path, int flags, mode_t mode)\n{\n#ifdef WIN32\n    wchar_t *wpath;\n    DWORD access = 0;\n    HANDLE handle;\n    int fd;\n\n    access |= GENERIC_READ;\n    if (flags & (O_WRONLY | O_RDWR))\n        access |= GENERIC_WRITE;\n\n    wpath = win32_long_path (path);\n\n    handle = CreateFileW (wpath,\n                          access,\n                          FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE,\n                          NULL,\n                          CREATE_ALWAYS,\n                          0,\n                          NULL);\n    if (handle == INVALID_HANDLE_VALUE) {\n        errno = windows_error_to_errno (GetLastError());\n        g_free (wpath);\n        return -1;\n    }\n\n    fd = _open_osfhandle ((intptr_t)handle, 0);\n\n    g_free (wpath);\n    return fd;\n#else\n    return open (path, flags, mode);\n#endif\n}\n\nint\nseaf_util_rename (const char *oldpath, const char *newpath)\n{\n#ifdef WIN32\n    wchar_t *oldpathw = win32_long_path (oldpath);\n    wchar_t *newpathw = win32_long_path (newpath);\n    int ret = 0;\n\n    if (!MoveFileExW (oldpathw, newpathw, MOVEFILE_REPLACE_EXISTING)) {\n        ret = -1;\n        errno = windows_error_to_errno (GetLastError());\n    }\n\n    g_free (oldpathw);\n    g_free (newpathw);\n    return ret;\n#else\n    return rename (oldpath, newpath);\n#endif\n}\n\ngboolean\nseaf_util_exists (const char *path)\n{\n#ifdef WIN32\n    wchar_t *wpath = win32_long_path (path);\n    DWORD attrs;\n    gboolean ret;\n\n    attrs = GetFileAttributesW (wpath);\n    ret = (attrs != INVALID_FILE_ATTRIBUTES);\n\n    g_free (wpath);\n    return ret;\n#else\n    return (access (path, F_OK) == 0);\n#endif\n}\n\ngint64\nseaf_util_lseek (int fd, gint64 offset, int whence)\n{\n#ifdef WIN32\n    return _lseeki64 (fd, offset, whence);\n#else\n    return lseek (fd, offset, whence);\n#endif\n}\n\n#ifdef WIN32\n\nint\ntraverse_directory_win32 (wchar_t *path_w,\n                          DirentCallback callback,\n                          void *user_data)\n{\n    WIN32_FIND_DATAW fdata;\n    HANDLE handle;\n    wchar_t *pattern;\n    char *path;\n    int path_len_w;\n    DWORD error;\n    gboolean stop;\n    int ret = 0;\n\n    path = g_utf16_to_utf8 (path_w, -1, NULL, NULL, NULL);\n\n    path_len_w = wcslen(path_w);\n\n    pattern = g_new0 (wchar_t, (path_len_w + 3));\n    wcscpy (pattern, path_w);\n    wcscat (pattern, L\"\\\\*\");\n\n    handle = FindFirstFileW (pattern, &fdata);\n    if (handle == INVALID_HANDLE_VALUE) {\n        g_warning (\"FindFirstFile failed %s: %lu.\\n\",\n                   path, GetLastError());\n        ret = -1;\n        goto out;\n    }\n\n    do {\n        if (wcscmp (fdata.cFileName, L\".\") == 0 ||\n            wcscmp (fdata.cFileName, L\"..\") == 0)\n            continue;\n\n        ++ret;\n\n        stop = FALSE;\n        if (callback (path_w, &fdata, user_data, &stop) < 0) {\n            ret = -1;\n            FindClose (handle);\n            goto out;\n        }\n        if (stop) {\n            FindClose (handle);\n            goto out;\n        }\n    } while (FindNextFileW (handle, &fdata) != 0);\n\n    error = GetLastError();\n    if (error != ERROR_NO_MORE_FILES) {\n        g_warning (\"FindNextFile failed %s: %lu.\\n\",\n                   path, error);\n        ret = -1;\n    }\n\n    FindClose (handle);\n\nout:\n    g_free (path);\n    g_free (pattern);\n    return ret;\n}\n\n#endif\n\nssize_t\nreadn (int fd, void *buf, size_t n)\n{\n\tsize_t\tn_left;\n\tssize_t\tn_read;\n\tchar\t*ptr;\n\n\tptr = buf;\n\tn_left = n;\n\twhile (n_left > 0) {\n        n_read = read(fd, ptr, n_left);\n\t\tif (n_read < 0) {\n\t\t\tif (errno == EINTR)\n\t\t\t\tn_read = 0;\n\t\t\telse\n\t\t\t\treturn -1;\n\t\t} else if (n_read == 0)\n\t\t\tbreak;\n\n\t\tn_left -= n_read;\n\t\tptr += n_read;\n\t}\n\treturn (n - n_left);\n}\n\nssize_t\nwriten (int fd, const void *buf, size_t n)\n{\n\tsize_t\t\tn_left;\n\tssize_t\t\tn_written;\n\tconst char\t*ptr;\n\n\tptr = buf;\n\tn_left = n;\n\twhile (n_left > 0) {\n        n_written = write(fd, ptr, n_left);\n\t\tif (n_written <= 0) {\n\t\t\tif (n_written < 0 && errno == EINTR)\n\t\t\t\tn_written = 0;\n\t\t\telse\n\t\t\t\treturn -1;\n\t\t}\n\n\t\tn_left -= n_written;\n\t\tptr += n_written;\n\t}\n\treturn n;\n}\n\n\nssize_t\nrecvn (evutil_socket_t fd, void *buf, size_t n)\n{\n\tsize_t\tn_left;\n\tssize_t\tn_read;\n\tchar\t*ptr;\n\n\tptr = buf;\n\tn_left = n;\n\twhile (n_left > 0) {\n#ifndef WIN32\n        if ((n_read = read(fd, ptr, n_left)) < 0)\n#else\n        if ((n_read = recv(fd, ptr, n_left, 0)) < 0)\n#endif\n        {\n\t\t\tif (errno == EINTR)\n\t\t\t\tn_read = 0;\n\t\t\telse\n\t\t\t\treturn -1;\n\t\t} else if (n_read == 0)\n\t\t\tbreak;\n\n\t\tn_left -= n_read;\n\t\tptr   += n_read;\n\t}\n\treturn (n - n_left);\n}\n\nssize_t\nsendn (evutil_socket_t fd, const void *buf, size_t n)\n{\n\tsize_t\t\tn_left;\n\tssize_t\t\tn_written;\n\tconst char\t*ptr;\n\n\tptr = buf;\n\tn_left = n;\n\twhile (n_left > 0) {\n#ifndef WIN32\n        if ( (n_written = write(fd, ptr, n_left)) <= 0)\n#else\n        if ( (n_written = send(fd, ptr, n_left, 0)) <= 0)\n#endif\n        {\n\t\t\tif (n_written < 0 && errno == EINTR)\n\t\t\t\tn_written = 0;\n\t\t\telse\n\t\t\t\treturn -1;\n\t\t}\n\n\t\tn_left -= n_written;\n\t\tptr   += n_written;\n\t}\n\treturn n;\n}\n\nint copy_fd (int ifd, int ofd)\n{\n    while (1) {\n        char buffer[8192];\n        ssize_t len = readn (ifd, buffer, sizeof(buffer));\n        if (!len)\n            break;\n        if (len < 0) {\n            close (ifd);\n            return -1;\n        }\n        if (writen (ofd, buffer, len) < 0) {\n            close (ofd);\n            return -1;\n        }\n    }\n    close(ifd);\n    return 0;\n}\n\nint copy_file (const char *dst, const char *src, int mode)\n{\n    int fdi, fdo, status;\n\n    if ((fdi = g_open (src, O_RDONLY | O_BINARY, 0)) < 0)\n        return fdi;\n\n    fdo = g_open (dst, O_WRONLY | O_CREAT | O_EXCL | O_BINARY, mode);\n    if (fdo < 0 && errno == EEXIST) {\n        close (fdi);\n        return 0;\n    } else if (fdo < 0){\n        close (fdi);\n        return -1;\n    }\n\n    status = copy_fd (fdi, fdo);\n    if (close (fdo) != 0)\n        return -1;\n\n    return status;\n}\n\nchar*\nccnet_expand_path (const char *src)\n{\n    int total_len = 0;\n#ifdef WIN32\n    char new_path[SEAF_PATH_MAX + 1];\n    char *p = new_path;\n    const char *q = src;\n\n    memset(new_path, 0, sizeof(new_path));\n    if (*src == '~') {\n        const char *home = g_get_home_dir();\n        total_len += strlen(home);\n        if (total_len > SEAF_PATH_MAX) {\n            return NULL;\n        }\n        memcpy(new_path, home, strlen(home));\n        p += strlen(new_path);\n        q++;\n    }\n    total_len += strlen(q);\n    if (total_len > SEAF_PATH_MAX) {\n        return NULL;\n    }\n    memcpy(p, q, strlen(q));\n\n    /* delete the charactor '\\' or '/' at the end of the path\n     * because the function stat faied to deal with directory names\n     * with '\\' or '/' in the end */\n    p = new_path + strlen(new_path) - 1;\n    while(*p == '\\\\' || *p == '/') *p-- = '\\0';\n\n    return strdup (new_path);\n#else\n    const char *next_in, *ntoken;\n    char new_path[SEAF_PATH_MAX + 1];\n    char *next_out;\n    int len;\n\n   /* special cases */\n    if (!src || *src == '\\0')\n        return NULL;\n    if (strlen(src) > SEAF_PATH_MAX)\n        return NULL;\n\n    next_in = src;\n    next_out = new_path;\n    *next_out = '\\0';\n\n    if (*src == '~') {\n        /* handle src start with '~' or '~<user>' like '~plt' */\n        struct passwd *pw = NULL;\n\n        for ( ; *next_in != '/' && *next_in != '\\0'; next_in++) ;\n        \n        len = next_in - src;\n        if (len == 1) {\n            pw = getpwuid (geteuid());\n        } else {\n            /* copy '~<user>' to new_path */\n            if (len > SEAF_PATH_MAX) {\n                return NULL;\n            }\n            memcpy (new_path, src, len);\n            new_path[len] = '\\0';\n            pw = getpwnam (new_path + 1);\n        }\n        if (pw == NULL)\n            return NULL;\n       \n        len = strlen (pw->pw_dir);\n        total_len += len;\n        if (total_len > SEAF_PATH_MAX) {\n            return NULL;\n        }\n        memcpy (new_path, pw->pw_dir, len);\n        next_out = new_path + len;\n        *next_out = '\\0';\n\n        if (*next_in == '\\0')\n            return strdup (new_path);\n    } else if (*src != '/') {\n        getcwd (new_path, SEAF_PATH_MAX);\n        for ( ; *next_out; next_out++) ; /* to '\\0' */\n    }\n    \n    while (*next_in != '\\0') {\n        /* move ntoken to the next not '/' char  */\n        for (ntoken = next_in; *ntoken == '/'; ntoken++) ;\n\n        for (next_in = ntoken; *next_in != '/' \n                 && *next_in != '\\0'; next_in++) ;\n \n        len = next_in - ntoken;\n\n        if (len == 0) {\n            /* the path ends with '/', keep it */\n            *next_out++ = '/';\n            *next_out = '\\0';\n            break;\n        }\n\n        if (len == 2 && ntoken[0] == '.' && ntoken[1] == '.') \n        {\n            /* '..' */\n            for (; next_out > new_path && *next_out != '/'; next_out--)\n                ;\n            *next_out = '\\0';\n        } else if (ntoken[0] != '.' || len != 1) {\n            /* not '.' */\n            *next_out++ = '/';\n            total_len += len;\n            if (total_len > SEAF_PATH_MAX) {\n                return NULL;\n            }\n            memcpy (next_out, ntoken, len);\n            next_out += len;\n            *next_out = '\\0';\n        }\n    }\n\n    /* the final special case */\n    if (new_path[0] == '\\0') {\n        new_path[0] = '/';\n        new_path[1] = '\\0';\n    }\n    return strdup (new_path);\n#endif\n}\n\n\nint\ncalculate_sha1 (unsigned char *sha1, const char *msg, int len)\n{\n    SHA_CTX c;\n\n    if (len < 0)\n        len = strlen(msg);\n\n    SHA1_Init(&c);\n    SHA1_Update(&c, msg, len);    \n\tSHA1_Final(sha1, &c);\n    return 0;\n}\n\nuint32_t\nccnet_sha1_hash (const void *v)\n{\n    /* 31 bit hash function */\n    const unsigned char *p = v;\n    uint32_t h = 0;\n    int i;\n\n    for (i = 0; i < 20; i++)\n        h = (h << 5) - h + p[i];\n\n    return h;\n}\n\nint\nccnet_sha1_equal (const void *v1,\n                  const void *v2)\n{\n    const unsigned char *p1 = v1;\n    const unsigned char *p2 = v2;\n    int i;\n\n    for (i = 0; i < 20; i++)\n        if (p1[i] != p2[i])\n            return 0;\n    \n    return 1;\n}\n\n#ifndef WIN32\nchar* gen_uuid ()\n{\n    char *uuid_str = g_malloc (37);\n    uuid_t uuid;\n\n    uuid_generate (uuid);\n    uuid_unparse_lower (uuid, uuid_str);\n\n    return uuid_str;\n}\n\nvoid gen_uuid_inplace (char *buf)\n{\n    uuid_t uuid;\n\n    uuid_generate (uuid);\n    uuid_unparse_lower (uuid, buf);\n}\n\ngboolean\nis_uuid_valid (const char *uuid_str)\n{\n    uuid_t uuid;\n\n    if (!uuid_str)\n        return FALSE;\n\n    if (uuid_parse (uuid_str, uuid) < 0)\n        return FALSE;\n    return TRUE;\n}\n\n#else\nchar* gen_uuid ()\n{\n    char *uuid_str = g_malloc (37);\n    unsigned char *str = NULL;\n    UUID uuid;\n\n    UuidCreate(&uuid);\n    UuidToString(&uuid, &str);\n    memcpy(uuid_str, str, 37);\n    RpcStringFree(&str);\n    return uuid_str;\n}\n\nvoid gen_uuid_inplace (char *buf)\n{\n    unsigned char *str = NULL;\n    UUID uuid;\n\n    UuidCreate(&uuid);\n    UuidToString(&uuid, &str);\n    memcpy(buf, str, 37);\n    RpcStringFree(&str);\n}\n\ngboolean\nis_uuid_valid (const char *uuid_str)\n{\n    if (!uuid_str)\n        return FALSE;\n\n    UUID uuid;\n    if (UuidFromString((unsigned char *)uuid_str, &uuid) != RPC_S_OK)\n        return FALSE;\n    return TRUE;\n}\n\n#endif\n\ngboolean\nis_object_id_valid (const char *obj_id)\n{\n    if (!obj_id)\n        return FALSE;\n\n    int len = strlen(obj_id);\n    int i;\n    char c;\n\n    if (len != 40)\n        return FALSE;\n\n    for (i = 0; i < len; ++i) {\n        c = obj_id[i];\n        if ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f'))\n            continue;\n        return FALSE;\n    }\n\n    return TRUE;\n}\n\nchar* strjoin_n (const char *seperator, int argc, char **argv)\n{\n    GString *buf;\n    int i;\n    char *str;\n\n    if (argc == 0)\n        return NULL;\n    \n    buf = g_string_new (argv[0]);\n    for (i = 1; i < argc; ++i) {\n        g_string_append (buf, seperator);\n        g_string_append (buf, argv[i]);\n    }\n\n    str = buf->str;\n    g_string_free (buf, FALSE);\n    return str;\n}\n\n\ngboolean is_ipaddr_valid (const char *ip)\n{\n    unsigned char buf[sizeof(struct in6_addr)];\n\n    if (evutil_inet_pton(AF_INET, ip, buf) == 1)\n        return TRUE;\n\n    if (evutil_inet_pton(AF_INET6, ip, buf) == 1)\n        return TRUE;\n    \n    return FALSE;\n}\n\nvoid parse_key_value_pairs (char *string, KeyValueFunc func, void *data)\n{\n    char *line = string, *next, *space;\n    char *key, *value;\n\n    while (*line) {\n        /* handle empty line */\n        if (*line == '\\n') {\n            ++line;\n            continue;\n        }\n\n        for (next = line; *next != '\\n' && *next; ++next) ;\n        *next = '\\0';\n        \n        for (space = line; space < next && *space != ' '; ++space) ;\n        if (*space != ' ') {\n            g_warning (\"Bad key value format: %s\\n\", line);\n            return;\n        }\n        *space = '\\0';\n        key = line;\n        value = space + 1;\n        \n        func (data, key, value);\n\n        line = next + 1;\n    }\n}\n\nvoid parse_key_value_pairs2 (char *string, KeyValueFunc2 func, void *data)\n{\n    char *line = string, *next, *space;\n    char *key, *value;\n\n    while (*line) {\n        /* handle empty line */\n        if (*line == '\\n') {\n            ++line;\n            continue;\n        }\n\n        for (next = line; *next != '\\n' && *next; ++next) ;\n        *next = '\\0';\n        \n        for (space = line; space < next && *space != ' '; ++space) ;\n        if (*space != ' ') {\n            g_warning (\"Bad key value format: %s\\n\", line);\n            return;\n        }\n        *space = '\\0';\n        key = line;\n        value = space + 1;\n        \n        if (func(data, key, value) == FALSE)\n            break;\n\n        line = next + 1;\n    }\n}\n\n/**\n * string_list_is_exists:\n * @str_list: \n * @string: a C string or %NULL\n *\n * Check whether @string is in @str_list.\n *\n * returns: %TRUE if @string is in str_list, %FALSE otherwise\n */\ngboolean\nstring_list_is_exists (GList *str_list, const char *string)\n{\n    GList *ptr;\n    for (ptr = str_list; ptr; ptr = ptr->next) {\n        if (g_strcmp0(string, ptr->data) == 0)\n            return TRUE;\n    }\n    return FALSE;\n}\n\n/**\n * string_list_append:\n * @str_list: \n * @string: a C string (can't be %NULL\n *\n * Append @string to @str_list if it is in the list.\n *\n * returns: the new start of the list\n */\nGList*\nstring_list_append (GList *str_list, const char *string)\n{\n    g_return_val_if_fail (string != NULL, str_list);\n\n    if (string_list_is_exists(str_list, string))\n        return str_list;\n\n    str_list = g_list_append (str_list, g_strdup(string));\n    return str_list;\n}\n\nGList *\nstring_list_append_sorted (GList *str_list, const char *string)\n{\n    g_return_val_if_fail (string != NULL, str_list);\n\n    if (string_list_is_exists(str_list, string))\n        return str_list;\n\n    str_list = g_list_insert_sorted_with_data (str_list, g_strdup(string),\n                                 (GCompareDataFunc)g_strcmp0, NULL);\n    return str_list;\n}\n\n\nGList *\nstring_list_remove (GList *str_list, const char *string)\n{\n    g_return_val_if_fail (string != NULL, str_list);\n\n    GList *ptr;\n\n    for (ptr = str_list; ptr; ptr = ptr->next) {\n        if (strcmp((char *)ptr->data, string) == 0) {\n            g_free (ptr->data);\n            return g_list_delete_link (str_list, ptr);\n        }\n    }\n    return str_list;\n}\n\n\nvoid\nstring_list_free (GList *str_list)\n{\n    GList *ptr = str_list;\n\n    while (ptr) {\n        g_free (ptr->data);\n        ptr = ptr->next;\n    }\n\n    g_list_free (str_list);\n}\n\n\nvoid\nstring_list_join (GList *str_list, GString *str, const char *seperator)\n{\n    GList *ptr;\n    if (!str_list)\n        return;\n\n    ptr = str_list;\n    g_string_append (str, ptr->data);\n\n    for (ptr = ptr->next; ptr; ptr = ptr->next) {\n        g_string_append (str, seperator);\n        g_string_append (str, (char *)ptr->data);\n    }\n}\n\nGList *\nstring_list_parse (const char *list_in_str, const char *seperator)\n{\n    if (!list_in_str)\n        return NULL;\n\n    GList *list = NULL;\n    char **array = g_strsplit (list_in_str, seperator, 0);\n    char **ptr;\n\n    for (ptr = array; *ptr; ptr++) {\n        list = g_list_prepend (list, g_strdup(*ptr));\n    }\n    list = g_list_reverse (list);\n    \n    g_strfreev (array);\n    return list;\n}\n\nGList *\nstring_list_parse_sorted (const char *list_in_str, const char *seperator)\n{\n    GList *list = string_list_parse (list_in_str, seperator);\n\n    return g_list_sort (list, (GCompareFunc)g_strcmp0);\n}\n\ngboolean\nstring_list_sorted_is_equal (GList *list1, GList *list2)\n{\n    GList *ptr1 = list1, *ptr2 = list2;\n\n    while (ptr1 && ptr2) {\n        if (g_strcmp0(ptr1->data, ptr2->data) != 0)\n            break;\n\n        ptr1 = ptr1->next;\n        ptr2 = ptr2->next;\n    }\n\n    if (!ptr1 && !ptr2)\n        return TRUE;\n    return FALSE;\n}\n\nchar **\nncopy_string_array (char **orig, int n)\n{\n    char **ret = g_malloc (sizeof(char *) * n);\n    int i = 0;\n\n    for (; i < n; i++)\n        ret[i] = g_strdup(orig[i]);\n    return ret;\n}\n\nvoid\nnfree_string_array (char **array, int n)\n{\n    int i = 0;\n\n    for (; i < n; i++)\n        g_free (array[i]);\n    g_free (array);\n}\n\ngint64\nget_current_time()\n{\n    return g_get_real_time();\n}\n\n#ifdef WIN32\nstatic SOCKET pg_serv_sock = INVALID_SOCKET;\nstatic struct sockaddr_in pg_serv_addr;\n\n/* pgpipe() should only be called in the main loop,\n * since it accesses the static global socket.\n */\nint\npgpipe (ccnet_pipe_t handles[2])\n{\n    int len = sizeof( pg_serv_addr );\n\n    handles[0] = handles[1] = INVALID_SOCKET;\n\n    if (pg_serv_sock == INVALID_SOCKET) {\n        if ((pg_serv_sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) {\n            g_warning(\"pgpipe failed to create socket: %d\\n\", WSAGetLastError());\n            return -1;\n        }\n\n        memset(&pg_serv_addr, 0, sizeof(pg_serv_addr));\n        pg_serv_addr.sin_family = AF_INET;\n        pg_serv_addr.sin_port = htons(0);\n        pg_serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);\n\n        if (bind(pg_serv_sock, (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR) {\n            g_warning(\"pgpipe failed to bind: %d\\n\", WSAGetLastError());\n            closesocket(pg_serv_sock);\n            pg_serv_sock = INVALID_SOCKET;\n            return -1;\n        }\n\n        if (listen(pg_serv_sock, SOMAXCONN) == SOCKET_ERROR) {\n            g_warning(\"pgpipe failed to listen: %d\\n\", WSAGetLastError());\n            closesocket(pg_serv_sock);\n            pg_serv_sock = INVALID_SOCKET;\n            return -1;\n        }\n\n        struct sockaddr_in tmp_addr;\n        int tmp_len = sizeof(tmp_addr);\n        if (getsockname(pg_serv_sock, (SOCKADDR *)&tmp_addr, &tmp_len) == SOCKET_ERROR) {\n            g_warning(\"pgpipe failed to getsockname: %d\\n\", WSAGetLastError());\n            closesocket(pg_serv_sock);\n            pg_serv_sock = INVALID_SOCKET;\n            return -1;\n        }\n        pg_serv_addr.sin_port = tmp_addr.sin_port;\n    }\n\n    if ((handles[1] = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)\n    {\n        g_warning(\"pgpipe failed to create socket 2: %d\\n\", WSAGetLastError());\n        closesocket(pg_serv_sock);\n        pg_serv_sock = INVALID_SOCKET;\n        return -1;\n    }\n\n    if (connect(handles[1], (SOCKADDR *)&pg_serv_addr, len) == SOCKET_ERROR)\n    {\n        g_warning(\"pgpipe failed to connect socket: %d\\n\", WSAGetLastError());\n        closesocket(handles[1]);\n        handles[1] = INVALID_SOCKET;\n        closesocket(pg_serv_sock);\n        pg_serv_sock = INVALID_SOCKET;\n        return -1;\n    }\n\n    struct sockaddr_in client_addr;\n    int client_len = sizeof(client_addr);\n    if ((handles[0] = accept(pg_serv_sock, (SOCKADDR *)&client_addr, &client_len)) == INVALID_SOCKET)\n    {\n        g_warning(\"pgpipe failed to accept socket: %d\\n\", WSAGetLastError());\n        closesocket(handles[1]);\n        handles[1] = INVALID_SOCKET;\n        closesocket(pg_serv_sock);\n        pg_serv_sock = INVALID_SOCKET;\n        return -1;\n    }\n\n    return 0;\n}\n#endif\n\n/*\n  The EVP_EncryptXXX and EVP_DecryptXXX series of functions have a\n  weird choice of returned value.\n*/\n#define ENC_SUCCESS 1\n#define ENC_FAILURE 0\n#define DEC_SUCCESS 1\n#define DEC_FAILURE 0\n\n\n#include <openssl/aes.h>\n#include <openssl/evp.h>\n\n/* Block size, in bytes. For AES it can only be 16 bytes. */\n#define BLK_SIZE 16\n#define ENCRYPT_BLK_SIZE BLK_SIZE\n\n\nint\nccnet_encrypt (char **data_out,\n               int *out_len,\n               const char *data_in,\n               const int in_len,\n               const char *code,\n               const int code_len)\n{\n    *data_out = NULL;\n    *out_len = -1;\n\n    /* check validation */\n    if ( data_in == NULL || in_len <= 0 ||\n         code == NULL || code_len <= 0) {\n\n        g_warning (\"Invalid params.\\n\");\n        return -1;\n    }\n\n    EVP_CIPHER_CTX *ctx;\n    int ret, key_len;\n    unsigned char key[16], iv[16];\n    int blks;                   \n\n    \n    /* Generate the derived key. We use AES 128 bits key,\n       Electroic-Code-Book cipher mode, and SHA1 as the message digest\n       when generating the key. IV is not used in ecb mode,\n       actually. */\n    key_len  = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */\n                               EVP_sha1(),        /* message digest */\n                               NULL,              /* salt */\n                               (unsigned char*)code, /* passwd */\n                               code_len,\n                               3,   /* iteration times */\n                               key, /* the derived key */\n                               iv); /* IV, initial vector */\n\n    /* The key should be 16 bytes long for our 128 bit key. */\n    if (key_len != 16) {\n        g_warning (\"failed to init EVP_CIPHER_CTX.\\n\");\n        return -1;\n    }\n\n    /* Prepare CTX for encryption. */\n    ctx = EVP_CIPHER_CTX_new ();\n\n    ret = EVP_EncryptInit_ex (ctx,\n                              EVP_aes_128_ecb(), /* cipher mode */\n                              NULL, /* engine, NULL for default */\n                              key,  /* derived key */\n                              iv);  /* initial vector */\n\n    if (ret == ENC_FAILURE){\n        EVP_CIPHER_CTX_free (ctx);\n        return -1;\n    }\n    /* Allocating output buffer. */\n    \n    /*\n      For EVP symmetric encryption, padding is always used __even if__\n      data size is a multiple of block size, in which case the padding\n      length is the block size. so we have the following:\n    */\n    \n    blks = (in_len / BLK_SIZE) + 1;\n\n    *data_out = (char *)g_malloc (blks * BLK_SIZE);\n\n    if (*data_out == NULL) {\n        g_warning (\"failed to allocate the output buffer.\\n\");\n        goto enc_error;\n    }                \n\n    int update_len, final_len;\n\n    /* Do the encryption. */\n    ret = EVP_EncryptUpdate (ctx,\n                             (unsigned char*)*data_out,\n                             &update_len,\n                             (unsigned char*)data_in,\n                             in_len);\n\n    if (ret == ENC_FAILURE)\n        goto enc_error;\n    \n    /* Finish the possible partial block. */\n    ret = EVP_EncryptFinal_ex (ctx,\n                               (unsigned char*)*data_out + update_len,\n                               &final_len);\n\n    *out_len = update_len + final_len;\n\n    /* out_len should be equal to the allocated buffer size. */\n    if (ret == ENC_FAILURE || *out_len != (blks * BLK_SIZE))\n        goto enc_error;\n    \n    EVP_CIPHER_CTX_free (ctx);\n\n    return 0;\n\nenc_error:\n\n    EVP_CIPHER_CTX_free (ctx);\n\n    *out_len = -1;\n\n    if (*data_out != NULL)\n        g_free (*data_out);\n\n    *data_out = NULL;\n\n    return -1;   \n}\n\nint\nccnet_decrypt (char **data_out,\n               int *out_len,\n               const char *data_in,\n               const int in_len,\n               const char *code,\n               const int code_len)\n{\n    *data_out = NULL;\n    *out_len = -1;\n\n    /* Check validation. Because padding is always used, in_len must\n     * be a multiple of BLK_SIZE */\n    if ( data_in == NULL || in_len <= 0 || in_len % BLK_SIZE != 0 ||\n         code == NULL || code_len <= 0) {\n\n        g_warning (\"Invalid param(s).\\n\");\n        return -1;\n    }\n\n    EVP_CIPHER_CTX *ctx;\n    int ret, key_len;\n    unsigned char key[16], iv[16];\n\n   \n    /* Generate the derived key. We use AES 128 bits key,\n       Electroic-Code-Book cipher mode, and SHA1 as the message digest\n       when generating the key. IV is not used in ecb mode,\n       actually. */\n    key_len  = EVP_BytesToKey (EVP_aes_128_ecb(), /* cipher mode */\n                               EVP_sha1(),        /* message digest */\n                               NULL,              /* salt */\n                               (unsigned char*)code, /* passwd */\n                               code_len,\n                               3,   /* iteration times */\n                               key, /* the derived key */\n                               iv); /* IV, initial vector */\n\n    /* The key should be 16 bytes long for our 128 bit key. */\n    if (key_len != 16) {\n        g_warning (\"failed to init EVP_CIPHER_CTX.\\n\");\n        return -1;\n    }\n\n\n    /* Prepare CTX for decryption. */\n    ctx = EVP_CIPHER_CTX_new ();\n\n    ret = EVP_DecryptInit_ex (ctx,\n                              EVP_aes_128_ecb(), /* cipher mode */\n                              NULL, /* engine, NULL for default */\n                              key,  /* derived key */\n                              iv);  /* initial vector */\n\n    if (ret == DEC_FAILURE)\n        return -1;\n\n    /* Allocating output buffer. */\n    \n    *data_out = (char *)g_malloc (in_len);\n\n    if (*data_out == NULL) {\n        g_warning (\"failed to allocate the output buffer.\\n\");\n        goto dec_error;\n    }                \n\n    int update_len, final_len;\n\n    /* Do the decryption. */\n    ret = EVP_DecryptUpdate (ctx,\n                             (unsigned char*)*data_out,\n                             &update_len,\n                             (unsigned char*)data_in,\n                             in_len);\n\n    if (ret == DEC_FAILURE)\n        goto dec_error;\n\n\n    /* Finish the possible partial block. */\n    ret = EVP_DecryptFinal_ex (ctx,\n                               (unsigned char*)*data_out + update_len,\n                               &final_len);\n\n    *out_len = update_len + final_len;\n\n    /* out_len should be smaller than in_len. */\n    if (ret == DEC_FAILURE || *out_len > in_len)\n        goto dec_error;\n\n    EVP_CIPHER_CTX_free (ctx);\n    \n    return 0;\n\ndec_error:\n\n    EVP_CIPHER_CTX_free (ctx);\n\n    *out_len = -1;\n    if (*data_out != NULL)\n        g_free (*data_out);\n\n    *data_out = NULL;\n\n    return -1;\n    \n}\n\n/* convert locale specific input to utf8 encoded string  */\nchar *ccnet_locale_to_utf8 (const gchar *src)\n{\n    if (!src)\n        return NULL;\n\n    gsize bytes_read = 0;\n    gsize bytes_written = 0;\n    GError *error = NULL;\n    gchar *dst = NULL;\n\n    dst = g_locale_to_utf8\n        (src,                   /* locale specific string */\n         strlen(src),           /* len of src */\n         &bytes_read,           /* length processed */\n         &bytes_written,        /* output length */\n         &error);\n\n    if (error) {\n        return NULL;\n    }\n\n    return dst;\n}\n\n/* convert utf8 input to locale specific string  */\nchar *ccnet_locale_from_utf8 (const gchar *src)\n{\n    if (!src)\n        return NULL;\n\n    gsize bytes_read = 0;\n    gsize bytes_written = 0;\n    GError *error = NULL;\n    gchar *dst = NULL;\n\n    dst = g_locale_from_utf8\n        (src,                   /* locale specific string */\n         strlen(src),           /* len of src */\n         &bytes_read,           /* length processed */\n         &bytes_written,        /* output length */\n         &error);\n\n    if (error) {\n        return NULL;\n    }\n\n    return dst;\n}\n\n#ifdef WIN32\n\nstatic HANDLE\nget_process_handle (const char *process_name_in)\n{\n    char name[256];\n    if (strstr(process_name_in, \".exe\")) {\n        snprintf (name, sizeof(name), \"%s\", process_name_in);\n    } else {\n        snprintf (name, sizeof(name), \"%s.exe\", process_name_in);\n    }\n\n    DWORD aProcesses[1024], cbNeeded, cProcesses;\n\n    if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded))\n        return NULL;\n\n    /* Calculate how many process identifiers were returned. */\n    cProcesses = cbNeeded / sizeof(DWORD);\n\n    HANDLE hProcess;\n    HMODULE hMod;\n    char process_name[SEAF_PATH_MAX];\n    unsigned int i;\n\n    for (i = 0; i < cProcesses; i++) {\n        if(aProcesses[i] == 0)\n            continue;\n        hProcess = OpenProcess (PROCESS_ALL_ACCESS, FALSE, aProcesses[i]);\n        if (!hProcess)\n            continue;\n            \n        if (EnumProcessModules(hProcess, &hMod, sizeof(hMod), &cbNeeded)) {\n            GetModuleBaseName(hProcess, hMod, process_name, \n                              sizeof(process_name)/sizeof(char));\n        }\n\n        if (strcasecmp(process_name, name) == 0)\n            return hProcess;\n        else {\n            CloseHandle(hProcess);\n        }\n    }\n    /* Not found */\n    return NULL;\n}\n\nint count_process (const char *process_name_in)\n{\n    char name[SEAF_PATH_MAX];\n    char process_name[SEAF_PATH_MAX];\n    DWORD aProcesses[1024], cbNeeded, cProcesses;\n    HANDLE hProcess;\n    HMODULE hMods[1024];\n    int count = 0;\n    int i, j;\n    \n    if (strstr(process_name_in, \".exe\")) {\n        snprintf (name, sizeof(name), \"%s\", process_name_in);\n    } else {\n        snprintf (name, sizeof(name), \"%s.exe\", process_name_in);\n    }\n\n    if (!EnumProcesses(aProcesses, sizeof(aProcesses), &cbNeeded)) {\n        return 0;\n    }\n\n    /* Calculate how many process identifiers were returned. */\n    cProcesses = cbNeeded / sizeof(DWORD);\n\n    for (i = 0; i < cProcesses; i++) {\n        if(aProcesses[i] == 0)\n            continue;\n        hProcess = OpenProcess (PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, aProcesses[i]);\n        if (!hProcess) {\n            continue;\n        }\n            \n        if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) {\n            for (j = 0; j < cbNeeded / sizeof(HMODULE); j++) {\n                if (GetModuleBaseName(hProcess, hMods[j], process_name,\n                                      sizeof(process_name))) {\n                    if (strcasecmp(process_name, name) == 0)\n                        count++;\n                }\n            } \n        }\n\n        CloseHandle(hProcess);\n    }\n    \n    return count;\n}\n\ngboolean\nprocess_is_running (const char *process_name)\n{\n    HANDLE proc_handle = get_process_handle(process_name);\n\n    if (proc_handle) {\n        CloseHandle(proc_handle);\n        return TRUE;\n    } else {\n        return FALSE;\n    }\n}\n\nint\nwin32_kill_process (const char *process_name)\n{\n    HANDLE proc_handle = get_process_handle(process_name);\n\n    if (proc_handle) {\n        TerminateProcess(proc_handle, 0);\n        CloseHandle(proc_handle);\n        return 0;\n    } else {\n        return -1;\n    }\n}\n\nint\nwin32_spawn_process (char *cmdline_in, char *working_directory_in)\n{\n    if (!cmdline_in)\n        return -1;\n\n    wchar_t *cmdline_w = NULL;\n    wchar_t *working_directory_w = NULL;\n\n    cmdline_w = wchar_from_utf8 (cmdline_in);\n    if (!cmdline_in) {\n        g_warning (\"failed to convert cmdline_in\");\n        return -1;\n    }\n    \n    if (working_directory_in) {\n        working_directory_w = wchar_from_utf8 (working_directory_in);\n        if (!working_directory_w) {\n            g_warning (\"failed to convert working_directory_in\");\n            return -1;\n        }\n    }\n\n    STARTUPINFOW si;\n    PROCESS_INFORMATION pi;\n    unsigned flags;\n    BOOL success;\n\n    /* we want to execute seafile without crreating a console window */\n    flags = CREATE_NO_WINDOW;\n\n    memset(&si, 0, sizeof(si));\n    si.cb = sizeof(si);\n    si.dwFlags = STARTF_USESTDHANDLES | STARTF_FORCEOFFFEEDBACK;\n    si.hStdInput = (HANDLE) _get_osfhandle(0);\n    si.hStdOutput = (HANDLE) _get_osfhandle(1);\n    si.hStdError = (HANDLE) _get_osfhandle(2);\n    \n    memset(&pi, 0, sizeof(pi));\n\n    success = CreateProcessW (NULL, cmdline_w, NULL, NULL, TRUE, flags,\n                              NULL, working_directory_w, &si, &pi);\n    free (cmdline_w);\n    if (working_directory_w) free (working_directory_w);\n    \n    if (!success) {\n        g_warning (\"failed to fork_process: GLE=%lu\\n\", GetLastError());\n        return -1;\n    }\n\n    /* close the handle of thread so that the process object can be freed by\n     * system\n     */\n    CloseHandle(pi.hThread);\n    CloseHandle(pi.hProcess);\n    return 0;\n}\n\nchar *\nwchar_to_utf8 (const wchar_t *wch)\n{\n    if (wch == NULL) {\n        return NULL;\n    }\n\n    char *utf8 = NULL;\n    int bufsize, len;\n\n    bufsize = WideCharToMultiByte\n        (CP_UTF8,               /* multibyte code page */\n         0,                     /* flags */\n         wch,                   /* src */\n         -1,                    /* src len, -1 for all includes \\0 */\n         utf8,                  /* dst */\n         0,                     /* dst buf len */\n         NULL,                  /* default char */\n         NULL);                 /* BOOL flag indicates default char is used */\n\n    if (bufsize <= 0) {\n        g_warning (\"failed to convert a string from wchar to utf8 0\");\n        return NULL;\n    }\n\n    utf8 = g_malloc(bufsize);\n    len = WideCharToMultiByte\n        (CP_UTF8,               /* multibyte code page */\n         0,                     /* flags */\n         wch,                   /* src */\n         -1,                    /* src len, -1 for all includes \\0 */\n         utf8,                  /* dst */\n         bufsize,               /* dst buf len */\n         NULL,                  /* default char */\n         NULL);                 /* BOOL flag indicates default char is used */\n\n    if (len != bufsize) {\n        g_free (utf8);\n        g_warning (\"failed to convert a string from wchar to utf8\");\n        return NULL;\n    }\n\n    return utf8;\n}\n\nwchar_t *\nwchar_from_utf8 (const char *utf8)\n{\n    if (utf8 == NULL) {\n        return NULL;\n    }\n\n    wchar_t *wch = NULL;\n    int bufsize, len;\n\n    bufsize = MultiByteToWideChar\n        (CP_UTF8,               /* multibyte code page */\n         0,                     /* flags */\n         utf8,                  /* src */\n         -1,                    /* src len, -1 for all includes \\0 */\n         wch,                   /* dst */\n         0);                    /* dst buf len */\n\n    if (bufsize <= 0) {\n        g_warning (\"failed to convert a string from wchar to utf8 0\");\n        return NULL;\n    }\n\n    wch = g_malloc (bufsize * sizeof(wchar_t));\n    len = MultiByteToWideChar\n        (CP_UTF8,               /* multibyte code page */\n         0,                     /* flags */\n         utf8,                  /* src */\n         -1,                    /* src len, -1 for all includes \\0 */\n         wch,                   /* dst */\n         bufsize);              /* dst buf len */\n\n    if (len != bufsize) {\n        g_free (wch);\n        g_warning (\"failed to convert a string from utf8 to wchar\");\n        return NULL;\n    }\n\n    return wch;\n}\n\n#endif  /* ifdef WIN32 */\n\n#ifdef __linux__\n/* read the link of /proc/123/exe and compare with `process_name' */\nstatic int\nfind_process_in_dirent(struct dirent *dir, const char *process_name)\n{\n    char path[512];\n    /* fisrst construct a path like /proc/123/exe */\n    if (sprintf (path, \"/proc/%s/exe\", dir->d_name) < 0) {\n        return -1;\n    }\n\n    char buf[SEAF_PATH_MAX];\n    /* get the full path of exe */\n    ssize_t l = readlink(path, buf, SEAF_PATH_MAX);\n\n    if (l < 0)\n        return -1;\n    buf[l] = '\\0';\n\n    /* get the base name of exe */\n    char *base = g_path_get_basename(buf);\n    int ret = strcmp(base, process_name);\n    g_free(base);\n\n    if (ret == 0)\n        return atoi(dir->d_name);\n    else\n        return -1;\n}\n\n/* read the /proc fs to determine whether some process is running */\ngboolean process_is_running (const char *process_name)\n{\n    DIR *proc_dir = opendir(\"/proc\");\n    if (!proc_dir) {\n        fprintf (stderr, \"failed to open /proc/ dir\\n\");\n        return FALSE;\n    }\n\n    struct dirent *subdir = NULL;\n    while ((subdir = readdir(proc_dir))) {\n        char first = subdir->d_name[0];\n        /* /proc/[1-9][0-9]* */\n        if (first > '9' || first < '1')\n            continue;\n        int pid = find_process_in_dirent(subdir, process_name);\n        if (pid > 0) {\n            closedir(proc_dir);\n            return TRUE;\n        }\n    }\n\n    closedir(proc_dir);\n    return FALSE;\n}\n\nint count_process(const char *process_name)\n{\n    int count = 0;\n    DIR *proc_dir = opendir(\"/proc\");\n    if (!proc_dir) {\n        g_warning (\"failed to open /proc/ :%s\\n\", strerror(errno));\n        return FALSE;\n    }\n\n    struct dirent *subdir = NULL;\n    while ((subdir = readdir(proc_dir))) {\n        char first = subdir->d_name[0];\n        /* /proc/[1-9][0-9]* */\n        if (first > '9' || first < '1')\n            continue;\n        if (find_process_in_dirent(subdir, process_name) > 0) {\n            count++;\n        }\n    }\n\n    closedir (proc_dir);\n    return count;\n}\n\n#endif\n\n#ifdef __APPLE__\ngboolean process_is_running (const char *process_name)\n{\n    //TODO\n    return FALSE;\n}\n#endif\n\nchar*\nccnet_object_type_from_id (const char *object_id)\n{\n    char *ptr;\n\n    if ( !(ptr = strchr(object_id, '/')) )\n        return NULL;\n\n    return g_strndup(object_id, ptr - object_id);\n}\n\n\n#ifdef WIN32\n/**\n * In Win32 we need to use _stat64 for files larger than 2GB. _stat64 needs\n * the `path' argument in gbk encoding.\n */\n    #define STAT_STRUCT struct __stat64\n    #define STAT_FUNC win_stat64_utf8\n\nstatic inline int\nwin_stat64_utf8 (char *path_utf8, STAT_STRUCT *sb)\n{\n    wchar_t *path_w = wchar_from_utf8 (path_utf8);\n    int result = _wstat64 (path_w, sb);\n    free (path_w);\n    return result;\n}\n\n#else\n    #define STAT_STRUCT struct stat\n    #define STAT_FUNC stat\n#endif\n\nstatic gint64\ncalc_recursively (const char *path, GError **calc_error)\n{\n    gint64 sum = 0;\n\n    GError *error = NULL;\n    GDir *folder = g_dir_open(path, 0, &error);\n    if (!folder) {\n        g_set_error (calc_error, CCNET_DOMAIN, 0,\n                     \"g_open() dir %s failed:%s\\n\", path, error->message);\n        return -1;\n    }\n\n    const char *name = NULL;\n    while ((name = g_dir_read_name(folder)) != NULL) {\n        STAT_STRUCT sb;\n        char *full_path= g_build_filename (path, name, NULL);\n        if (STAT_FUNC(full_path, &sb) < 0) {\n            g_set_error (calc_error, CCNET_DOMAIN, 0, \"failed to stat on %s: %s\\n\",\n                         full_path, strerror(errno));\n            g_free(full_path);\n            g_dir_close(folder);\n            return -1;\n        }\n\n        if (S_ISDIR(sb.st_mode)) {\n            gint64 size = calc_recursively(full_path, calc_error);\n            if (size < 0) {\n                g_free (full_path);\n                g_dir_close (folder);\n                return -1;\n            }\n            sum += size;\n            g_free(full_path);\n        } else if (S_ISREG(sb.st_mode)) {\n            sum += sb.st_size;\n            g_free(full_path);\n        }\n    }\n\n    g_dir_close (folder);\n    return sum;\n}\n\ngint64\nccnet_calc_directory_size (const char *path, GError **error)\n{\n    return calc_recursively (path, error);\n}\n\n#ifdef WIN32\n/*\n * strtok_r code directly from glibc.git /string/strtok_r.c since windows\n * doesn't have it.\n */\nchar *\nstrtok_r(char *s, const char *delim, char **save_ptr)\n{\n    char *token;\n    \n    if(s == NULL)\n        s = *save_ptr;\n    \n    /* Scan leading delimiters.  */\n    s += strspn(s, delim);\n    if(*s == '\\0') {\n        *save_ptr = s;\n        return NULL;\n    }\n    \n    /* Find the end of the token.  */\n    token = s;\n    s = strpbrk(token, delim);\n    \n    if(s == NULL) {\n        /* This token finishes the string.  */\n        *save_ptr = strchr(token, '\\0');\n    } else {\n        /* Terminate the token and make *SAVE_PTR point past it.  */\n        *s = '\\0';\n        *save_ptr = s + 1;\n    }\n    \n    return token;\n}\n#endif\n\n/* JSON related utils. For compatibility with json-glib. */\n\nconst char *\njson_object_get_string_member (json_t *object, const char *key)\n{\n    json_t *string = json_object_get (object, key);\n    if (!string)\n        return NULL;\n    return json_string_value (string);\n}\n\ngboolean\njson_object_has_member (json_t *object, const char *key)\n{\n    return (json_object_get (object, key) != NULL);\n}\n\ngint64\njson_object_get_int_member (json_t *object, const char *key)\n{\n    json_t *integer = json_object_get (object, key);\n    return json_integer_value (integer);\n}\n\nvoid\njson_object_set_string_member (json_t *object, const char *key, const char *value)\n{\n    json_object_set_new (object, key, json_string (value));\n}\n\nvoid\njson_object_set_int_member (json_t *object, const char *key, gint64 value)\n{\n    json_object_set_new (object, key, json_integer (value));\n}\n\nvoid\nclean_utf8_data (char *data, int len)\n{\n    const char *s, *e;\n    char *p;\n    gboolean is_valid;\n\n    s = data;\n    p = data;\n\n    while ((s - data) != len) {\n        is_valid = g_utf8_validate (s, len - (s - data), &e);\n        if (is_valid)\n            break;\n\n        if (s != e)\n            p += (e - s);\n        *p = '?';\n        ++p;\n        s = e + 1;\n    }\n}\n\nchar *\nnormalize_utf8_path (const char *path)\n{\n    if (!g_utf8_validate (path, -1, NULL))\n        return NULL;\n    return g_utf8_normalize (path, -1, G_NORMALIZE_NFC);\n}\n\n/* zlib related wrapper functions. */\n\n#define ZLIB_BUF_SIZE 16384\n\nint\nseaf_compress (guint8 *input, int inlen, guint8 **output, int *outlen)\n{\n    int ret;\n    unsigned have;\n    z_stream strm;\n    guint8 out[ZLIB_BUF_SIZE];\n    GByteArray *barray;\n\n    if (inlen == 0)\n        return -1;\n\n    /* allocate deflate state */\n    strm.zalloc = Z_NULL;\n    strm.zfree = Z_NULL;\n    strm.opaque = Z_NULL;\n    ret = deflateInit(&strm, Z_DEFAULT_COMPRESSION);\n    if (ret != Z_OK) {\n        g_warning (\"deflateInit failed.\\n\");\n        return -1;\n    }\n\n    strm.avail_in = inlen;\n    strm.next_in = input;\n    barray = g_byte_array_new ();\n\n    do {\n        strm.avail_out = ZLIB_BUF_SIZE;\n        strm.next_out = out;\n        ret = deflate(&strm, Z_FINISH);    /* no bad return value */\n        have = ZLIB_BUF_SIZE - strm.avail_out;\n        g_byte_array_append (barray, out, have);\n    } while (ret != Z_STREAM_END);\n\n    *outlen = barray->len;\n    *output = g_byte_array_free (barray, FALSE);\n\n    /* clean up and return */\n    (void)deflateEnd(&strm);\n    return 0;\n}\n\nint\nseaf_decompress (guint8 *input, int inlen, guint8 **output, int *outlen)\n{\n    int ret;\n    unsigned have;\n    z_stream strm;\n    unsigned char out[ZLIB_BUF_SIZE];\n    GByteArray *barray;\n\n    if (inlen == 0) {\n        g_warning (\"Empty input for zlib, invalid.\\n\");\n        return -1;\n    }\n\n    /* allocate inflate state */\n    strm.zalloc = Z_NULL;\n    strm.zfree = Z_NULL;\n    strm.opaque = Z_NULL;\n    strm.avail_in = 0;\n    strm.next_in = Z_NULL;\n    ret = inflateInit(&strm);\n    if (ret != Z_OK) {\n        g_warning (\"inflateInit failed.\\n\");\n        return -1;\n    }\n\n    strm.avail_in = inlen;\n    strm.next_in = input;\n    barray = g_byte_array_new ();\n\n    do {\n        strm.avail_out = ZLIB_BUF_SIZE;\n        strm.next_out = out;\n        ret = inflate(&strm, Z_NO_FLUSH);\n        if (ret < 0) {\n            g_warning (\"Failed to inflate.\\n\");\n            goto out;\n        }\n        have = ZLIB_BUF_SIZE - strm.avail_out;\n        g_byte_array_append (barray, out, have);\n    } while (ret != Z_STREAM_END);\n\nout:\n    /* clean up and return */\n    (void)inflateEnd(&strm);\n\n    if (ret == Z_STREAM_END) {\n        *outlen = barray->len;\n        *output = g_byte_array_free (barray, FALSE);\n        return 0;\n    } else {\n        g_byte_array_free (barray, TRUE);\n        return -1;\n    }\n}\n\nchar*\nformat_dir_path (const char *path)\n{\n    int path_len = strlen (path);\n    char *rpath;\n    if (path[0] != '/') {\n        rpath = g_strconcat (\"/\", path, NULL);\n        path_len++;\n    } else {\n        rpath = g_strdup (path);\n    }\n    while (path_len > 1 && rpath[path_len-1] == '/') {\n        rpath[path_len-1] = '\\0';\n        path_len--;\n    }\n\n    return rpath;\n}\n\ngboolean\nis_empty_string (const char *str)\n{\n    return !str || strcmp (str, \"\") == 0;\n}\n\ngboolean\nis_permission_valid (const char *perm)\n{\n    if (is_empty_string (perm)) {\n        return FALSE;\n    }\n\n    return strcmp (perm, \"r\") == 0 || strcmp (perm, \"rw\") == 0;\n}\n\nchar *\nseaf_key_file_get_string (GKeyFile *key_file,\n                          const char *group,\n                          const char *key,\n                          GError **error)\n{\n    char *v;\n\n    v = g_key_file_get_string (key_file, group, key, error);\n    if (!v || v[0] == '\\0') {\n        g_free (v);\n        return NULL;\n    }\n\n    return g_strchomp(v);\n}\n\ngchar*\nccnet_key_file_get_string (GKeyFile *keyf,\n                           const char *category,\n                           const char *key)\n{\n    gchar *v;\n\n    if (!g_key_file_has_key (keyf, category, key, NULL))\n        return NULL;\n\n    v = g_key_file_get_string (keyf, category, key, NULL);\n    if (v != NULL && v[0] == '\\0') {\n        g_free(v);\n        return NULL;\n    }\n\n    return g_strchomp(v);\n}\n"
  },
  {
    "path": "lib/utils.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef CCNET_UTILS_H\n#define CCNET_UTILS_H\n\n#ifdef WIN32\n#ifndef _WIN32_WINNT\n#define _WIN32_WINNT 0x500\n#endif\n#include <windows.h>\n#endif\n\n#include <sys/time.h>\n#include <time.h>\n#include <stdint.h>\n#include <unistd.h>\n#include <stdarg.h>\n#include <glib.h>\n#include <glib-object.h>\n#include <stdlib.h>\n#include <sys/stat.h>\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/util.h>\n#else\n#include <evutil.h>\n#endif\n\n#ifdef __linux__\n#include <endian.h>\n#endif\n\n#ifdef __OpenBSD__\n#include <machine/endian.h>\n#endif\n\n#ifdef WIN32\n#include <errno.h>\n#include <glib/gstdio.h>\n\n#ifndef WEXITSTATUS\n#define WEXITSTATUS(status) (((status) & 0xff00) >> 8)\n#endif\n\n/* Borrowed from libevent */\n#define ccnet_pipe_t intptr_t\n\nint pgpipe (ccnet_pipe_t handles[2]);\n/* Should only be called in main loop. */\n#define ccnet_pipe(a) pgpipe((a))\n#define piperead(a,b,c) recv((a),(b),(c),0)\n#define pipewrite(a,b,c) send((a),(b),(c),0)\n#define pipeclose(a) closesocket((a))\n\n#define SeafStat struct __stat64\n\n#else\n\n#define ccnet_pipe_t int\n\n#define ccnet_pipe(a) pipe((a))\n#define piperead(a,b,c) read((a),(b),(c))\n#define pipewrite(a,b,c) write((a),(b),(c))\n#define pipeclose(a) close((a))\n\n#define SeafStat struct stat\n\n#endif\n\n#define pipereadn(a,b,c) recvn((a),(b),(c))\n#define pipewriten(a,b,c) sendn((a),(b),(c))\n\nint seaf_stat (const char *path, SeafStat *st);\nint seaf_fstat (int fd, SeafStat *st);\n\n#ifdef WIN32\nvoid\nseaf_stat_from_find_data (WIN32_FIND_DATAW *fdata, SeafStat *st);\n#endif\n\nint seaf_set_file_time (const char *path, guint64 mtime);\n\n#ifdef WIN32\nwchar_t *\nwin32_long_path (const char *path);\n\n/* Convert a (possible) 8.3 format path to long path */\nwchar_t *\nwin32_83_path_to_long_path (const char *worktree, const wchar_t *path, int path_len);\n\n__time64_t\nfile_time_to_unix_time (FILETIME *ftime);\n#endif\n\nint\nseaf_util_unlink (const char *path);\n\nint\nseaf_util_rmdir (const char *path);\n\nint\nseaf_util_mkdir (const char *path, mode_t mode);\n\nint\nseaf_util_open (const char *path, int flags);\n\nint\nseaf_util_create (const char *path, int flags, mode_t mode);\n\nint\nseaf_util_rename (const char *oldpath, const char *newpath);\n\ngboolean\nseaf_util_exists (const char *path);\n\ngint64\nseaf_util_lseek (int fd, gint64 offset, int whence);\n\n#ifdef WIN32\n\ntypedef int (*DirentCallback) (wchar_t *parent,\n                               WIN32_FIND_DATAW *fdata,\n                               void *user_data,\n                               gboolean *stop);\n\nint\ntraverse_directory_win32 (wchar_t *path_w,\n                          DirentCallback callback,\n                          void *user_data);\n#endif\n\n#ifndef O_BINARY\n#define O_BINARY 0\n#endif\n\n/* for debug */\n#ifndef ccnet_warning\n#define ccnet_warning(fmt, ...) g_warning(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_error\n#define ccnet_error(fmt, ...)   g_error(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#ifndef ccnet_message\n#define ccnet_message(fmt, ...) g_message(\"%s(%d): \" fmt, __FILE__, __LINE__, ##__VA_ARGS__)\n#endif\n\n#define CCNET_DOMAIN g_quark_from_string(\"ccnet\")\n\n#define CCNET_ERR_INTERNAL 500\n\n\nstruct timeval timeval_from_msec (uint64_t milliseconds);\n\n\nsize_t ccnet_strlcpy (char *dst, const char *src, size_t size);\n\nvoid rawdata_to_hex (const unsigned char *rawdata, char *hex_str, int n_bytes);\nint hex_to_rawdata (const char *hex_str, unsigned char *rawdata, int n_bytes);\n\n#define sha1_to_hex(sha1, hex) rawdata_to_hex((sha1), (hex), 20)\n#define hex_to_sha1(hex, sha1) hex_to_rawdata((hex), (sha1), 20)\n\n/* If msg is NULL-terminated, set len to -1 */\nint calculate_sha1 (unsigned char *sha1, const char *msg, int len);\nint ccnet_sha1_equal (const void *v1, const void *v2);\nunsigned int ccnet_sha1_hash (const void *v);\n\nchar* gen_uuid ();\nvoid gen_uuid_inplace (char *buf);\ngboolean is_uuid_valid (const char *uuid_str);\n\ngboolean\nis_object_id_valid (const char *obj_id);\n\n/* dir operations */\nint checkdir (const char *dir);\nint checkdir_with_mkdir (const char *path);\nchar* ccnet_expand_path (const char *src);\n\n/**\n * Make directory with 256 sub-directories from '00' to 'ff'.\n * `base` and subdir will be created if they are not existing. \n */\nint objstore_mkdir (const char *base);\nvoid objstore_get_path (char *path, const char *base, const char *obj_id);\n\n/* Read \"n\" bytes from a descriptor. */\nssize_t\treadn(int fd, void *vptr, size_t n);\nssize_t writen(int fd, const void *vptr, size_t n);\n\n/* Read \"n\" bytes from a socket. */\nssize_t\trecvn(evutil_socket_t fd, void *vptr, size_t n);\nssize_t sendn(evutil_socket_t fd, const void *vptr, size_t n);\n\nint copy_fd (int ifd, int ofd);\nint copy_file (const char *dst, const char *src, int mode);\n\n\n/* string utilities */\n\nchar** strsplit_by_char (char *string, int *length, char c);\n\nchar * strjoin_n (const char *seperator, int argc, char **argv);\n\nint is_ipaddr_valid (const char *ip);\n\ntypedef void (*KeyValueFunc) (void *data, const char *key, char *value);\nvoid parse_key_value_pairs (char *string, KeyValueFunc func, void *data);\n\ntypedef gboolean (*KeyValueFunc2) (void *data, const char *key,\n                                   const char *value);\nvoid parse_key_value_pairs2 (char *string, KeyValueFunc2 func, void *data);\n\nGList *string_list_append (GList *str_list, const char *string);\nGList *string_list_append_sorted (GList *str_list, const char *string);\nGList *string_list_remove (GList *str_list, const char *string);\nvoid string_list_free (GList *str_list);\ngboolean string_list_is_exists (GList *str_list, const char *string);\nvoid string_list_join (GList *str_list, GString *strbuf, const char *seperator);\nGList *string_list_parse (const char *list_in_str, const char *seperator);\nGList *string_list_parse_sorted (const char *list_in_str, const char *seperator);\ngboolean string_list_sorted_is_equal (GList *list1, GList *list2);\n\nchar** ncopy_string_array (char **orig, int n);\nvoid nfree_string_array (char **array, int n);\n\n/* 64bit time */\ngint64 get_current_time();\n\nint\nccnet_encrypt (char **data_out,\n               int *out_len,\n               const char *data_in,\n               const int in_len,\n               const char *code,\n               const int code_len);\n\n\nint\nccnet_decrypt (char **data_out,\n               int *out_len,\n               const char *data_in,\n               const int in_len,\n               const char *code,\n               const int code_len);\n\n\n/*\n * Utility functions for converting data to/from network byte order.\n */\n\nstatic inline uint64_t\nbswap64 (uint64_t val)\n{\n    uint64_t ret;\n    uint8_t *ptr = (uint8_t *)&ret;\n\n    ptr[0]=((val)>>56)&0xFF;\n    ptr[1]=((val)>>48)&0xFF;\n    ptr[2]=((val)>>40)&0xFF;\n    ptr[3]=((val)>>32)&0xFF;\n    ptr[4]=((val)>>24)&0xFF;\n    ptr[5]=((val)>>16)&0xFF;\n    ptr[6]=((val)>>8)&0xFF;\n    ptr[7]=(val)&0xFF;\n\n    return ret;\n}\n\nstatic inline uint64_t\nhton64(uint64_t val)\n{\n#if __BYTE_ORDER == __LITTLE_ENDIAN || defined WIN32 || defined __APPLE__\n    return bswap64 (val);\n#else\n    return val;\n#endif\n}\n\nstatic inline uint64_t \nntoh64(uint64_t val) \n{\n#if __BYTE_ORDER == __LITTLE_ENDIAN || defined WIN32 || defined __APPLE__\n    return bswap64 (val);\n#else\n    return val;\n#endif\n}\n\nstatic inline void put64bit(uint8_t **ptr,uint64_t val)\n{\n    uint64_t val_n = hton64 (val);\n    *((uint64_t *)(*ptr)) = val_n;\n    (*ptr)+=8;\n}\n\nstatic inline void put32bit(uint8_t **ptr,uint32_t val)\n{\n    uint32_t val_n = htonl (val);\n    *((uint32_t *)(*ptr)) = val_n;\n    (*ptr)+=4;\n}\n\nstatic inline void put16bit(uint8_t **ptr,uint16_t val)\n{\n    uint16_t val_n = htons (val);\n    *((uint16_t *)(*ptr)) = val_n;\n    (*ptr)+=2;\n}\n\nstatic inline uint64_t get64bit(const uint8_t **ptr)\n{\n    uint64_t val_h = ntoh64 (*((uint64_t *)(*ptr)));\n    (*ptr)+=8;\n    return val_h;\n}\n\nstatic inline uint32_t get32bit(const uint8_t **ptr)\n{\n    uint32_t val_h = ntohl (*((uint32_t *)(*ptr)));\n    (*ptr)+=4;\n    return val_h;\n}\n\nstatic inline uint16_t get16bit(const uint8_t **ptr)\n{\n    uint16_t val_h = ntohs (*((uint16_t *)(*ptr)));\n    (*ptr)+=2;\n    return val_h;\n}\n\n/* Convert between local encoding and utf8. Returns the converted\n * string if success, otherwise return NULL\n */\nchar *ccnet_locale_from_utf8 (const gchar *src);\nchar *ccnet_locale_to_utf8 (const gchar *src);\n\n/* Detect whether a process with the given name is running right now. */\ngboolean process_is_running(const char *name);\n\n/* count how much instance of a program is running  */\nint count_process (const char *process_name_in);\n\n#ifdef WIN32\nint win32_kill_process (const char *process_name_in);\nint win32_spawn_process (char *cmd, char *wd);\nchar *wchar_to_utf8 (const wchar_t *src);\nwchar_t *wchar_from_utf8 (const char *src);\n#endif\n\nchar* ccnet_object_type_from_id (const char *object_id);\n\ngint64 ccnet_calc_directory_size (const char *path, GError **error);\n\n#ifdef WIN32\nchar * strtok_r(char *s, const char *delim, char **save_ptr);\n#endif\n\n#include <jansson.h>\n\nconst char *\njson_object_get_string_member (json_t *object, const char *key);\n\ngboolean\njson_object_has_member (json_t *object, const char *key);\n\ngint64\njson_object_get_int_member (json_t *object, const char *key);\n\nvoid\njson_object_set_string_member (json_t *object, const char *key, const char *value);\n\nvoid\njson_object_set_int_member (json_t *object, const char *key, gint64 value);\n\n/* Replace invalid UTF-8 bytes with '?' */\nvoid\nclean_utf8_data (char *data, int len);\n\nchar *\nnormalize_utf8_path (const char *path);\n\n/* zlib related functions. */\n\nint\nseaf_compress (guint8 *input, int inlen, guint8 **output, int *outlen);\n\nint\nseaf_decompress (guint8 *input, int inlen, guint8 **output, int *outlen);\n\nchar*\nformat_dir_path (const char *path);\n\ngboolean\nis_empty_string (const char *str);\n\ngboolean\nis_permission_valid (const char *perm);\n\nchar *\nseaf_key_file_get_string (GKeyFile *key_file,\n                          const char *group,\n                          const char *key,\n                          GError **error);\n\ngchar* ccnet_key_file_get_string (GKeyFile *keyf,\n                                  const char *category,\n                                  const char *key);\n\n#endif\n"
  },
  {
    "path": "lib/webaccess.vala",
    "content": "namespace Seafile {\n\npublic class WebAccess : Object {\n       public string repo_id { set; get; }\n       public string obj_id { set; get; }\n       public string op { set; get; }\n       public string username { set; get; }\n}\n\n}\n"
  },
  {
    "path": "m4/ax_lib_sqlite3.m4",
    "content": "# ===========================================================================\n#         http://www.nongnu.org/autoconf-archive/ax_lib_sqlite3.html\n# ===========================================================================\n#\n# SYNOPSIS\n#\n#   AX_LIB_SQLITE3([MINIMUM-VERSION])\n#\n# DESCRIPTION\n#\n#   Test for the SQLite 3 library of a particular version (or newer)\n#\n#   This macro takes only one optional argument, required version of SQLite\n#   3 library. If required version is not passed, 3.0.0 is used in the test\n#   of existance of SQLite 3.\n#\n#   If no intallation prefix to the installed SQLite library is given the\n#   macro searches under /usr, /usr/local, and /opt.\n#\n#   This macro calls:\n#\n#     AC_SUBST(SQLITE3_CFLAGS)\n#     AC_SUBST(SQLITE3_LDFLAGS)\n#     AC_SUBST(SQLITE3_VERSION)\n#\n#   And sets:\n#\n#     HAVE_SQLITE3\n#\n# LICENSE\n#\n#   Copyright (c) 2008 Mateusz Loskot <mateusz@loskot.net>\n#\n#   Copying and distribution of this file, with or without modification, are\n#   permitted in any medium without royalty provided the copyright notice\n#   and this notice are preserved.\n\nAC_DEFUN([AX_LIB_SQLITE3],\n[\n    AC_ARG_WITH([sqlite3],\n        AC_HELP_STRING(\n            [--with-sqlite3=@<:@ARG@:>@],\n            [use SQLite 3 library @<:@default=yes@:>@, optionally specify the prefix for sqlite3 library]\n        ),\n        [\n        if test \"$withval\" = \"no\"; then\n            WANT_SQLITE3=\"no\"\n        elif test \"$withval\" = \"yes\"; then\n            WANT_SQLITE3=\"yes\"\n            ac_sqlite3_path=\"\"\n        else\n            WANT_SQLITE3=\"yes\"\n            ac_sqlite3_path=\"$withval\"\n        fi\n        ],\n        [WANT_SQLITE3=\"yes\"]\n    )\n\n    SQLITE3_CFLAGS=\"\"\n    SQLITE3_LDFLAGS=\"\"\n    SQLITE3_VERSION=\"\"\n\n    if test \"x$WANT_SQLITE3\" = \"xyes\"; then\n\n        ac_sqlite3_header=\"sqlite3.h\"\n\n        sqlite3_version_req=ifelse([$1], [], [3.0.0], [$1])\n        sqlite3_version_req_shorten=`expr $sqlite3_version_req : '\\([[0-9]]*\\.[[0-9]]*\\)'`\n        sqlite3_version_req_major=`expr $sqlite3_version_req : '\\([[0-9]]*\\)'`\n        sqlite3_version_req_minor=`expr $sqlite3_version_req : '[[0-9]]*\\.\\([[0-9]]*\\)'`\n        sqlite3_version_req_micro=`expr $sqlite3_version_req : '[[0-9]]*\\.[[0-9]]*\\.\\([[0-9]]*\\)'`\n        if test \"x$sqlite3_version_req_micro\" = \"x\" ; then\n            sqlite3_version_req_micro=\"0\"\n        fi\n\n        sqlite3_version_req_number=`expr $sqlite3_version_req_major \\* 1000000 \\\n                                   \\+ $sqlite3_version_req_minor \\* 1000 \\\n                                   \\+ $sqlite3_version_req_micro`\n\n        AC_MSG_CHECKING([for SQLite3 library >= $sqlite3_version_req])\n\n        if test \"$ac_sqlite3_path\" != \"\"; then\n            ac_sqlite3_ldflags=\"-L$ac_sqlite3_path/lib\"\n            ac_sqlite3_cppflags=\"-I$ac_sqlite3_path/include\"\n        else\n            for ac_sqlite3_path_tmp in /usr /usr/local /opt ; do\n                if test -f \"$ac_sqlite3_path_tmp/include/$ac_sqlite3_header\" \\\n                    && test -r \"$ac_sqlite3_path_tmp/include/$ac_sqlite3_header\"; then\n                    ac_sqlite3_path=$ac_sqlite3_path_tmp\n                    ac_sqlite3_cppflags=\"-I$ac_sqlite3_path_tmp/include\"\n                    ac_sqlite3_ldflags=\"-L$ac_sqlite3_path_tmp/lib\"\n                    break;\n                fi\n            done\n        fi\n\n        ac_sqlite3_ldflags=\"$ac_sqlite3_ldflags -lsqlite3\"\n\n        saved_CPPFLAGS=\"$CPPFLAGS\"\n        CPPFLAGS=\"$CPPFLAGS $ac_sqlite3_cppflags\"\n\n\n        AC_COMPILE_IFELSE(\n            [\n            AC_LANG_PROGRAM([[@%:@include <sqlite3.h>]],\n                [[\n#if (SQLITE_VERSION_NUMBER >= $sqlite3_version_req_number)\n// Everything is okay\n#else\n#  error SQLite version is too old\n#endif\n                ]]\n            )\n            ],\n            [\n            AC_MSG_RESULT([yes])\n            success=\"yes\"\n            ],\n            [\n            AC_MSG_RESULT([not found])\n            succees=\"no\"\n            ]\n        )\n\n\n        CPPFLAGS=\"$saved_CPPFLAGS\"\n\n        if test \"$success\" = \"yes\"; then\n\n            SQLITE3_CFLAGS=\"$ac_sqlite3_cppflags\"\n            SQLITE3_LDFLAGS=\"$ac_sqlite3_ldflags\"\n\n            ac_sqlite3_header_path=\"$ac_sqlite3_path/include/$ac_sqlite3_header\"\n\n            dnl Retrieve SQLite release version\n            if test \"x$ac_sqlite3_header_path\" != \"x\"; then\n                ac_sqlite3_version=`cat $ac_sqlite3_header_path \\\n                    | grep '#define.*SQLITE_VERSION.*\\\"' | sed -e 's/.* \"//' \\\n                        | sed -e 's/\"//'`\n                if test $ac_sqlite3_version != \"\"; then\n                    SQLITE3_VERSION=$ac_sqlite3_version\n                else\n                    AC_MSG_WARN([Can not find SQLITE_VERSION macro in sqlite3.h header to retrieve SQLite version!])\n                fi\n            fi\n\n            AC_SUBST(SQLITE3_CFLAGS)\n            AC_SUBST(SQLITE3_LDFLAGS)\n            AC_SUBST(SQLITE3_VERSION)\n            AC_DEFINE([HAVE_SQLITE3], [], [Have the SQLITE3 library])\n        fi\n    fi\n])\n"
  },
  {
    "path": "m4/glib-gettext.m4",
    "content": "# Copyright (C) 1995-2002 Free Software Foundation, Inc.\n# Copyright (C) 2001-2003,2004 Red Hat, Inc.\n#\n# This file is free software, distributed under the terms of the GNU\n# General Public License.  As a special exception to the GNU General\n# Public License, this file may be distributed as part of a program\n# that contains a configuration script generated by Autoconf, under\n# the same distribution terms as the rest of that program.\n#\n# This file can be copied and used freely without restrictions.  It can\n# be used in projects which are not available under the GNU Public License\n# but which still want to provide support for the GNU gettext functionality.\n#\n# Macro to add for using GNU gettext.\n# Ulrich Drepper <drepper@cygnus.com>, 1995, 1996\n#\n# Modified to never use included libintl. \n# Owen Taylor <otaylor@redhat.com>, 12/15/1998\n#\n# Major rework to remove unused code\n# Owen Taylor <otaylor@redhat.com>, 12/11/2002\n#\n# Added better handling of ALL_LINGUAS from GNU gettext version \n# written by Bruno Haible, Owen Taylor <otaylor.redhat.com> 5/30/3002\n#\n# Modified to require ngettext\n# Matthias Clasen <mclasen@redhat.com> 08/06/2004\n#\n# We need this here as well, since someone might use autoconf-2.5x\n# to configure GLib then an older version to configure a package\n# using AM_GLIB_GNU_GETTEXT\nAC_PREREQ(2.53)\n\ndnl\ndnl We go to great lengths to make sure that aclocal won't \ndnl try to pull in the installed version of these macros\ndnl when running aclocal in the glib directory.\ndnl\nm4_copy([AC_DEFUN],[glib_DEFUN])\nm4_copy([AC_REQUIRE],[glib_REQUIRE])\ndnl\ndnl At the end, if we're not within glib, we'll define the public\ndnl definitions in terms of our private definitions.\ndnl\n\n# GLIB_LC_MESSAGES\n#--------------------\nglib_DEFUN([GLIB_LC_MESSAGES],\n  [AC_CHECK_HEADERS([locale.h])\n    if test $ac_cv_header_locale_h = yes; then\n    AC_CACHE_CHECK([for LC_MESSAGES], am_cv_val_LC_MESSAGES,\n      [AC_TRY_LINK([#include <locale.h>], [return LC_MESSAGES],\n       am_cv_val_LC_MESSAGES=yes, am_cv_val_LC_MESSAGES=no)])\n    if test $am_cv_val_LC_MESSAGES = yes; then\n      AC_DEFINE(HAVE_LC_MESSAGES, 1,\n        [Define if your <locale.h> file defines LC_MESSAGES.])\n    fi\n  fi])\n\n# GLIB_PATH_PROG_WITH_TEST\n#----------------------------\ndnl GLIB_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR,\ndnl   TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]])\nglib_DEFUN([GLIB_PATH_PROG_WITH_TEST],\n[# Extract the first word of \"$2\", so it can be a program name with args.\nset dummy $2; ac_word=[$]2\nAC_MSG_CHECKING([for $ac_word])\nAC_CACHE_VAL(ac_cv_path_$1,\n[case \"[$]$1\" in\n  /*)\n  ac_cv_path_$1=\"[$]$1\" # Let the user override the test with a path.\n  ;;\n  *)\n  IFS=\"${IFS= \t}\"; ac_save_ifs=\"$IFS\"; IFS=\"${IFS}:\"\n  for ac_dir in ifelse([$5], , $PATH, [$5]); do\n    test -z \"$ac_dir\" && ac_dir=.\n    if test -f $ac_dir/$ac_word; then\n      if [$3]; then\n\tac_cv_path_$1=\"$ac_dir/$ac_word\"\n\tbreak\n      fi\n    fi\n  done\n  IFS=\"$ac_save_ifs\"\ndnl If no 4th arg is given, leave the cache variable unset,\ndnl so AC_PATH_PROGS will keep looking.\nifelse([$4], , , [  test -z \"[$]ac_cv_path_$1\" && ac_cv_path_$1=\"$4\"\n])dnl\n  ;;\nesac])dnl\n$1=\"$ac_cv_path_$1\"\nif test ifelse([$4], , [-n \"[$]$1\"], [\"[$]$1\" != \"$4\"]); then\n  AC_MSG_RESULT([$]$1)\nelse\n  AC_MSG_RESULT(no)\nfi\nAC_SUBST($1)dnl\n])\n\n# GLIB_WITH_NLS\n#-----------------\nglib_DEFUN([GLIB_WITH_NLS],\n  dnl NLS is obligatory\n  [USE_NLS=yes\n    AC_SUBST(USE_NLS)\n\n    gt_cv_have_gettext=no\n\n    CATOBJEXT=NONE\n    XGETTEXT=:\n    INTLLIBS=\n\n    AC_CHECK_HEADER(libintl.h,\n     [gt_cv_func_dgettext_libintl=\"no\"\n      libintl_extra_libs=\"\"\n\n      #\n      # First check in libc\n      #\n      AC_CACHE_CHECK([for ngettext in libc], gt_cv_func_ngettext_libc,\n        [AC_TRY_LINK([\n#include <libintl.h>\n],\n         [return !ngettext (\"\",\"\", 1)],\n\t  gt_cv_func_ngettext_libc=yes,\n          gt_cv_func_ngettext_libc=no)\n        ])\n  \n      if test \"$gt_cv_func_ngettext_libc\" = \"yes\" ; then\n\t      AC_CACHE_CHECK([for dgettext in libc], gt_cv_func_dgettext_libc,\n        \t[AC_TRY_LINK([\n#include <libintl.h>\n],\n\t          [return !dgettext (\"\",\"\")],\n\t\t  gt_cv_func_dgettext_libc=yes,\n\t          gt_cv_func_dgettext_libc=no)\n        \t])\n      fi\n  \n      if test \"$gt_cv_func_ngettext_libc\" = \"yes\" ; then\n        AC_CHECK_FUNCS(bind_textdomain_codeset)\n      fi\n\n      #\n      # If we don't have everything we want, check in libintl\n      #\n      if test \"$gt_cv_func_dgettext_libc\" != \"yes\" \\\n\t || test \"$gt_cv_func_ngettext_libc\" != \"yes\" \\\n         || test \"$ac_cv_func_bind_textdomain_codeset\" != \"yes\" ; then\n        \n        AC_CHECK_LIB(intl, bindtextdomain,\n\t    [AC_CHECK_LIB(intl, ngettext,\n\t\t    [AC_CHECK_LIB(intl, dgettext,\n\t\t\t          gt_cv_func_dgettext_libintl=yes)])])\n\n\tif test \"$gt_cv_func_dgettext_libintl\" != \"yes\" ; then\n\t  AC_MSG_CHECKING([if -liconv is needed to use gettext])\n\t  AC_MSG_RESULT([])\n  \t  AC_CHECK_LIB(intl, ngettext,\n          \t[AC_CHECK_LIB(intl, dcgettext,\n\t\t       [gt_cv_func_dgettext_libintl=yes\n\t\t\tlibintl_extra_libs=-liconv],\n\t\t\t:,-liconv)],\n\t\t:,-liconv)\n        fi\n\n        #\n        # If we found libintl, then check in it for bind_textdomain_codeset();\n        # we'll prefer libc if neither have bind_textdomain_codeset(),\n        # and both have dgettext and ngettext\n        #\n        if test \"$gt_cv_func_dgettext_libintl\" = \"yes\" ; then\n          glib_save_LIBS=\"$LIBS\"\n          LIBS=\"$LIBS -lintl $libintl_extra_libs\"\n          unset ac_cv_func_bind_textdomain_codeset\n          AC_CHECK_FUNCS(bind_textdomain_codeset)\n          LIBS=\"$glib_save_LIBS\"\n\n          if test \"$ac_cv_func_bind_textdomain_codeset\" = \"yes\" ; then\n            gt_cv_func_dgettext_libc=no\n          else\n            if test \"$gt_cv_func_dgettext_libc\" = \"yes\" \\\n\t\t&& test \"$gt_cv_func_ngettext_libc\" = \"yes\"; then\n              gt_cv_func_dgettext_libintl=no\n            fi\n          fi\n        fi\n      fi\n\n      if test \"$gt_cv_func_dgettext_libc\" = \"yes\" \\\n\t|| test \"$gt_cv_func_dgettext_libintl\" = \"yes\"; then\n        gt_cv_have_gettext=yes\n      fi\n  \n      if test \"$gt_cv_func_dgettext_libintl\" = \"yes\"; then\n        INTLLIBS=\"-lintl $libintl_extra_libs\"\n      fi\n  \n      if test \"$gt_cv_have_gettext\" = \"yes\"; then\n\tAC_DEFINE(HAVE_GETTEXT,1,\n\t  [Define if the GNU gettext() function is already present or preinstalled.])\n\tGLIB_PATH_PROG_WITH_TEST(MSGFMT, msgfmt,\n\t  [test -z \"`$ac_dir/$ac_word -h 2>&1 | grep 'dv '`\"], no)dnl\n\tif test \"$MSGFMT\" != \"no\"; then\n          glib_save_LIBS=\"$LIBS\"\n          LIBS=\"$LIBS $INTLLIBS\"\n\t  AC_CHECK_FUNCS(dcgettext)\n\t  AC_PATH_PROG(GMSGFMT, gmsgfmt, $MSGFMT)\n\t  GLIB_PATH_PROG_WITH_TEST(XGETTEXT, xgettext,\n\t    [test -z \"`$ac_dir/$ac_word -h 2>&1 | grep '(HELP)'`\"], :)\n\t  AC_TRY_LINK(, [extern int _nl_msg_cat_cntr;\n\t\t\t return _nl_msg_cat_cntr],\n\t    [CATOBJEXT=.gmo \n             DATADIRNAME=share],\n\t    [case $host in\n\t    *-*-solaris*)\n\t    dnl On Solaris, if bind_textdomain_codeset is in libc,\n\t    dnl GNU format message catalog is always supported,\n            dnl since both are added to the libc all together.\n\t    dnl Hence, we'd like to go with DATADIRNAME=share and\n\t    dnl and CATOBJEXT=.gmo in this case.\n            AC_CHECK_FUNC(bind_textdomain_codeset,\n\t      [CATOBJEXT=.gmo \n               DATADIRNAME=share],\n\t      [CATOBJEXT=.mo\n               DATADIRNAME=lib])\n\t    ;;\n\t    *)\n\t    CATOBJEXT=.mo\n            DATADIRNAME=lib\n\t    ;;\n\t    esac])\n          LIBS=\"$glib_save_LIBS\"\n\t  INSTOBJEXT=.mo\n\telse\n\t  gt_cv_have_gettext=no\n\tfi\n      fi\n    ])\n\n    if test \"$gt_cv_have_gettext\" = \"yes\" ; then\n      AC_DEFINE(ENABLE_NLS, 1,\n        [always defined to indicate that i18n is enabled])\n    fi\n\n    dnl Test whether we really found GNU xgettext.\n    if test \"$XGETTEXT\" != \":\"; then\n      dnl If it is not GNU xgettext we define it as : so that the\n      dnl Makefiles still can work.\n      if $XGETTEXT --omit-header /dev/null 2> /dev/null; then\n        : ;\n      else\n        AC_MSG_RESULT(\n\t  [found xgettext program is not GNU xgettext; ignore it])\n        XGETTEXT=\":\"\n      fi\n    fi\n\n    # We need to process the po/ directory.\n    POSUB=po\n\n    AC_OUTPUT_COMMANDS(\n      [case \"$CONFIG_FILES\" in *po/Makefile.in*)\n        sed -e \"/POTFILES =/r po/POTFILES\" po/Makefile.in > po/Makefile\n      esac])\n\n    dnl These rules are solely for the distribution goal.  While doing this\n    dnl we only have to keep exactly one list of the available catalogs\n    dnl in configure.in.\n    for lang in $ALL_LINGUAS; do\n      GMOFILES=\"$GMOFILES $lang.gmo\"\n      POFILES=\"$POFILES $lang.po\"\n    done\n\n    dnl Make all variables we use known to autoconf.\n    AC_SUBST(CATALOGS)\n    AC_SUBST(CATOBJEXT)\n    AC_SUBST(DATADIRNAME)\n    AC_SUBST(GMOFILES)\n    AC_SUBST(INSTOBJEXT)\n    AC_SUBST(INTLLIBS)\n    AC_SUBST(PO_IN_DATADIR_TRUE)\n    AC_SUBST(PO_IN_DATADIR_FALSE)\n    AC_SUBST(POFILES)\n    AC_SUBST(POSUB)\n  ])\n\n# AM_GLIB_GNU_GETTEXT\n# -------------------\n# Do checks necessary for use of gettext. If a suitable implementation \n# of gettext is found in either in libintl or in the C library,\n# it will set INTLLIBS to the libraries needed for use of gettext\n# and AC_DEFINE() HAVE_GETTEXT and ENABLE_NLS. (The shell variable\n# gt_cv_have_gettext will be set to \"yes\".) It will also call AC_SUBST()\n# on various variables needed by the Makefile.in.in installed by \n# glib-gettextize.\ndnl\nglib_DEFUN([GLIB_GNU_GETTEXT],\n  [AC_REQUIRE([AC_PROG_CC])dnl\n   AC_REQUIRE([AC_HEADER_STDC])dnl\n   \n   GLIB_LC_MESSAGES\n   GLIB_WITH_NLS\n\n   if test \"$gt_cv_have_gettext\" = \"yes\"; then\n     if test \"x$ALL_LINGUAS\" = \"x\"; then\n       LINGUAS=\n     else\n       AC_MSG_CHECKING(for catalogs to be installed)\n       NEW_LINGUAS=\n       for presentlang in $ALL_LINGUAS; do\n         useit=no\n         if test \"%UNSET%\" != \"${LINGUAS-%UNSET%}\"; then\n           desiredlanguages=\"$LINGUAS\"\n         else\n           desiredlanguages=\"$ALL_LINGUAS\"\n         fi\n         for desiredlang in $desiredlanguages; do\n \t   # Use the presentlang catalog if desiredlang is\n           #   a. equal to presentlang, or\n           #   b. a variant of presentlang (because in this case,\n           #      presentlang can be used as a fallback for messages\n           #      which are not translated in the desiredlang catalog).\n           case \"$desiredlang\" in\n             \"$presentlang\"*) useit=yes;;\n           esac\n         done\n         if test $useit = yes; then\n           NEW_LINGUAS=\"$NEW_LINGUAS $presentlang\"\n         fi\n       done\n       LINGUAS=$NEW_LINGUAS\n       AC_MSG_RESULT($LINGUAS)\n     fi\n\n     dnl Construct list of names of catalog files to be constructed.\n     if test -n \"$LINGUAS\"; then\n       for lang in $LINGUAS; do CATALOGS=\"$CATALOGS $lang$CATOBJEXT\"; done\n     fi\n   fi\n\n   dnl If the AC_CONFIG_AUX_DIR macro for autoconf is used we possibly\n   dnl find the mkinstalldirs script in another subdir but ($top_srcdir).\n   dnl Try to locate is.\n   MKINSTALLDIRS=\n   if test -n \"$ac_aux_dir\"; then\n     MKINSTALLDIRS=\"$ac_aux_dir/mkinstalldirs\"\n   fi\n   if test -z \"$MKINSTALLDIRS\"; then\n     MKINSTALLDIRS=\"\\$(top_srcdir)/mkinstalldirs\"\n   fi\n   AC_SUBST(MKINSTALLDIRS)\n\n   dnl Generate list of files to be processed by xgettext which will\n   dnl be included in po/Makefile.\n   test -d po || mkdir po\n   if test \"x$srcdir\" != \"x.\"; then\n     if test \"x`echo $srcdir | sed 's@/.*@@'`\" = \"x\"; then\n       posrcprefix=\"$srcdir/\"\n     else\n       posrcprefix=\"../$srcdir/\"\n     fi\n   else\n     posrcprefix=\"../\"\n   fi\n   rm -f po/POTFILES\n   sed -e \"/^#/d\" -e \"/^\\$/d\" -e \"s,.*,\t$posrcprefix& \\\\\\\\,\" -e \"\\$s/\\(.*\\) \\\\\\\\/\\1/\" \\\n\t< $srcdir/po/POTFILES.in > po/POTFILES\n  ])\n\n# AM_GLIB_DEFINE_LOCALEDIR(VARIABLE)\n# -------------------------------\n# Define VARIABLE to the location where catalog files will\n# be installed by po/Makefile.\nglib_DEFUN([GLIB_DEFINE_LOCALEDIR],\n[glib_REQUIRE([GLIB_GNU_GETTEXT])dnl\nglib_save_prefix=\"$prefix\"\nglib_save_exec_prefix=\"$exec_prefix\"\ntest \"x$prefix\" = xNONE && prefix=$ac_default_prefix\ntest \"x$exec_prefix\" = xNONE && exec_prefix=$prefix\nif test \"x$CATOBJEXT\" = \"x.mo\" ; then\n  localedir=`eval echo \"${libdir}/locale\"`\nelse\n  localedir=`eval echo \"${datadir}/locale\"`\nfi\nprefix=\"$glib_save_prefix\"\nexec_prefix=\"$glib_save_exec_prefix\"\nAC_DEFINE_UNQUOTED($1, \"$localedir\",\n  [Define the location where the catalogs will be installed])\n])\n\ndnl\ndnl Now the definitions that aclocal will find\ndnl\nifdef(glib_configure_in,[],[\nAC_DEFUN([AM_GLIB_GNU_GETTEXT],[GLIB_GNU_GETTEXT($@)])\nAC_DEFUN([AM_GLIB_DEFINE_LOCALEDIR],[GLIB_DEFINE_LOCALEDIR($@)])\n])dnl\n"
  },
  {
    "path": "m4/python.m4",
    "content": "## this one is commonly used with AM_PATH_PYTHONDIR ...\ndnl AM_CHECK_PYMOD(MODNAME [,SYMBOL [,ACTION-IF-FOUND [,ACTION-IF-NOT-FOUND]]])\ndnl Check if a module containing a given symbol is visible to python.\nAC_DEFUN([AM_CHECK_PYMOD],\n[AC_REQUIRE([AM_PATH_PYTHON])\npy_mod_var=`echo $1['_']$2 | sed 'y%./+-%__p_%'`\nAC_MSG_CHECKING(for ifelse([$2],[],,[$2 in ])python module $1)\nAC_CACHE_VAL(py_cv_mod_$py_mod_var, [\nifelse([$2],[], [prog=\"\nimport sys\ntry:\n        import $1\nexcept ImportError:\n        sys.exit(1)\nexcept:\n        sys.exit(0)\nsys.exit(0)\"], [prog=\"\nimport $1\n$1.$2\"])\nif $PYTHON -c \"$prog\" 1>&AC_FD_CC 2>&AC_FD_CC\n  then\n    eval \"py_cv_mod_$py_mod_var=yes\"\n  else\n    eval \"py_cv_mod_$py_mod_var=no\"\n  fi\n])\npy_val=`eval \"echo \\`echo '$py_cv_mod_'$py_mod_var\\`\"`\nif test \"x$py_val\" != xno; then\n  AC_MSG_RESULT(yes)\n  ifelse([$3], [],, [$3\n])dnl\nelse\n  AC_MSG_RESULT(no)\n  ifelse([$4], [],, [$4\n])dnl\nfi\n])\n\ndnl a macro to check for ability to create python extensions\ndnl  AM_CHECK_PYTHON_HEADERS([ACTION-IF-POSSIBLE], [ACTION-IF-NOT-POSSIBLE])\ndnl function also defines PYTHON_INCLUDES\nAC_DEFUN([AM_CHECK_PYTHON_HEADERS],\n[AC_REQUIRE([AM_PATH_PYTHON])\nAC_MSG_CHECKING(for headers required to compile python extensions)\ndnl deduce PYTHON_INCLUDES\npy_prefix=`$PYTHON -c \"import sys; print sys.prefix\"`\npy_exec_prefix=`$PYTHON -c \"import sys; print sys.exec_prefix\"`\nif test -x \"$PYTHON-config\"; then\nPYTHON_INCLUDES=`$PYTHON-config --includes 2>/dev/null`\nelse\nPYTHON_INCLUDES=\"-I${py_prefix}/include/python${PYTHON_VERSION}\"\nif test \"$py_prefix\" != \"$py_exec_prefix\"; then\n  PYTHON_INCLUDES=\"$PYTHON_INCLUDES -I${py_exec_prefix}/include/python${PYTHON_VERSION}\"\nfi\nfi\nAC_SUBST(PYTHON_INCLUDES)\ndnl check if the headers exist:\nsave_CPPFLAGS=\"$CPPFLAGS\"\nCPPFLAGS=\"$CPPFLAGS $PYTHON_INCLUDES\"\nAC_TRY_CPP([#include <Python.h>],dnl\n[AC_MSG_RESULT(found)\n$1],dnl\n[AC_MSG_RESULT(not found)\n$2])\nCPPFLAGS=\"$save_CPPFLAGS\"\n])\n"
  },
  {
    "path": "notification-server/.golangci.yml",
    "content": "run:\n  timeout: 2m\n\nlinters:\n  enable:\n   - govet\n   - gocyclo\n   - gosimple\n   - ineffassign\n   - staticcheck\n   - unused\n   - gofmt\n  disable:\n   - errcheck\n"
  },
  {
    "path": "notification-server/ccnet.conf",
    "content": "[Database]\nENGINE = mysql\nHOST = 127.0.0.1\nUSER = seafile\nPASSWD = seafile\nDB = ccnet-db\nCREATE_TABLES=true\n"
  },
  {
    "path": "notification-server/client.go",
    "content": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"runtime/debug\"\n\t\"time\"\n\n\tjwt \"github.com/golang-jwt/jwt/v5\"\n\t\"github.com/gorilla/websocket\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nconst (\n\twriteWait = 1 * time.Second\n\tpongWait  = 5 * time.Second\n\t// Send pings to peer with this period. Must be less than pongWait.\n\tpingPeriod = 1 * time.Second\n\n\tcheckTokenPeriod = 1 * time.Hour\n)\n\n// Message is the message communicated between clients and server.\ntype Message struct {\n\tType    string          `json:\"type\"`\n\tContent json.RawMessage `json:\"content\"`\n}\n\ntype SubList struct {\n\tRepos []Repo `json:\"repos\"`\n}\n\ntype UnsubList struct {\n\tRepos []Repo `json:\"repos\"`\n}\n\ntype Repo struct {\n\tRepoID string `json:\"id\"`\n\tToken  string `json:\"jwt_token\"`\n}\n\ntype myClaims struct {\n\tExp      int64  `json:\"exp\"`\n\tRepoID   string `json:\"repo_id\"`\n\tUserName string `json:\"username\"`\n\tjwt.RegisteredClaims\n}\n\nfunc (*myClaims) Valid() error {\n\treturn nil\n}\n\nfunc (client *Client) Close() {\n\tclient.conn.Close()\n}\n\nfunc RecoverWrapper(f func()) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Printf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t}\n\t}()\n\n\tf()\n}\n\n// HandleMessages connects to the client to process message.\nfunc (client *Client) HandleMessages() {\n\t// Set keep alive.\n\tclient.conn.SetPongHandler(func(string) error {\n\t\tclient.Alive = time.Now()\n\t\treturn nil\n\t})\n\n\tclient.ConnCloser.AddRunning(4)\n\tgo RecoverWrapper(client.readMessages)\n\tgo RecoverWrapper(client.writeMessages)\n\tgo RecoverWrapper(client.checkTokenExpired)\n\tgo RecoverWrapper(client.keepAlive)\n\tclient.ConnCloser.Wait()\n\tclient.Close()\n\tUnregisterClient(client)\n\tfor id := range client.Repos {\n\t\tclient.unsubscribe(id)\n\t}\n}\n\nfunc (client *Client) readMessages() {\n\tconn := client.conn\n\tdefer func() {\n\t\tclient.ConnCloser.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-client.ConnCloser.HasBeenClosed():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t\tvar msg Message\n\t\terr := conn.ReadJSON(&msg)\n\t\tif err != nil {\n\t\t\tclient.ConnCloser.Signal()\n\t\t\tlog.Debugf(\"failed to read json data from client: %s: %v\", client.Addr, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = client.handleMessage(&msg)\n\t\tif err != nil {\n\t\t\tclient.ConnCloser.Signal()\n\t\t\tlog.Debugf(\"%v\", err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc checkToken(tokenString, repoID string) (string, int64, bool) {\n\tif len(tokenString) == 0 {\n\t\treturn \"\", -1, false\n\t}\n\tclaims := new(myClaims)\n\ttoken, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(privateKey), nil\n\t})\n\tif err != nil {\n\t\treturn \"\", -1, false\n\t}\n\n\tif !token.Valid {\n\t\treturn \"\", -1, false\n\t}\n\n\tnow := time.Now()\n\tif claims.RepoID != repoID || claims.Exp <= now.Unix() {\n\t\treturn \"\", -1, false\n\t}\n\n\treturn claims.UserName, claims.Exp, true\n}\n\nfunc (client *Client) handleMessage(msg *Message) error {\n\tcontent := msg.Content\n\n\tif msg.Type == \"subscribe\" {\n\t\tvar list SubList\n\t\terr := json.Unmarshal(content, &list)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, repo := range list.Repos {\n\t\t\tuser, exp, valid := checkToken(repo.Token, repo.RepoID)\n\t\t\tif !valid {\n\t\t\t\tclient.notifJWTExpired(repo.RepoID)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tclient.subscribe(repo.RepoID, user, exp)\n\t\t}\n\t} else if msg.Type == \"unsubscribe\" {\n\t\tvar list UnsubList\n\t\terr := json.Unmarshal(content, &list)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, r := range list.Repos {\n\t\t\tclient.unsubscribe(r.RepoID)\n\t\t}\n\t} else {\n\t\terr := fmt.Errorf(\"recv unexpected type of message: %s\", msg.Type)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// subscribe subscribes to notifications of repos.\nfunc (client *Client) subscribe(repoID, user string, exp int64) {\n\tclient.User = user\n\n\tclient.ReposMutex.Lock()\n\tclient.Repos[repoID] = exp\n\tclient.ReposMutex.Unlock()\n\n\tsubMutex.Lock()\n\tsubscribers, ok := subscriptions[repoID]\n\tif !ok {\n\t\tsubscribers = newSubscribers(client)\n\t\tsubscriptions[repoID] = subscribers\n\t}\n\tsubMutex.Unlock()\n\n\tsubscribers.Mutex.Lock()\n\tsubscribers.Clients[client.ID] = client\n\tsubscribers.Mutex.Unlock()\n}\n\nfunc (client *Client) unsubscribe(repoID string) {\n\tclient.ReposMutex.Lock()\n\tdelete(client.Repos, repoID)\n\tclient.ReposMutex.Unlock()\n\n\tsubMutex.Lock()\n\tsubscribers, ok := subscriptions[repoID]\n\tif !ok {\n\t\tsubMutex.Unlock()\n\t\treturn\n\t}\n\tsubMutex.Unlock()\n\n\tsubscribers.Mutex.Lock()\n\tdelete(subscribers.Clients, client.ID)\n\tsubscribers.Mutex.Unlock()\n\n}\n\nfunc (client *Client) writeMessages() {\n\tdefer func() {\n\t\tclient.ConnCloser.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-client.WCh:\n\t\t\tclient.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tclient.connMutex.Lock()\n\t\t\terr := client.conn.WriteJSON(msg)\n\t\t\tclient.connMutex.Unlock()\n\t\t\tif err != nil {\n\t\t\t\tclient.ConnCloser.Signal()\n\t\t\t\tlog.Debugf(\"failed to send notification to client: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tm, _ := msg.(*Message)\n\t\t\tlog.Debugf(\"send %s event to client %s(%d): %s\", m.Type, client.User, client.ID, string(m.Content))\n\t\tcase <-client.ConnCloser.HasBeenClosed():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (client *Client) keepAlive() {\n\tdefer func() {\n\t\tclient.ConnCloser.Done()\n\t}()\n\n\tticker := time.NewTicker(pingPeriod)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif time.Since(client.Alive) > pongWait {\n\t\t\t\tclient.ConnCloser.Signal()\n\t\t\t\tlog.Debugf(\"disconnected because no pong was received for more than %v\", pongWait)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tclient.conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tclient.connMutex.Lock()\n\t\t\terr := client.conn.WriteMessage(websocket.PingMessage, nil)\n\t\t\tclient.connMutex.Unlock()\n\t\t\tif err != nil {\n\t\t\t\tclient.ConnCloser.Signal()\n\t\t\t\tlog.Debugf(\"failed to send ping message to client: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-client.ConnCloser.HasBeenClosed():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (client *Client) checkTokenExpired() {\n\tdefer func() {\n\t\tclient.ConnCloser.Done()\n\t}()\n\n\tticker := time.NewTicker(checkTokenPeriod)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t// unsubscribe will delete repo from client.Repos, we'd better unsubscribe repos later.\n\t\t\tpendingRepos := make(map[string]struct{})\n\t\t\tnow := time.Now()\n\t\t\tclient.ReposMutex.Lock()\n\t\t\tfor repoID, exp := range client.Repos {\n\t\t\t\tif exp >= now.Unix() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tpendingRepos[repoID] = struct{}{}\n\t\t\t}\n\t\t\tclient.ReposMutex.Unlock()\n\n\t\t\tfor repoID := range pendingRepos {\n\t\t\t\tclient.unsubscribe(repoID)\n\t\t\t\tclient.notifJWTExpired(repoID)\n\t\t\t}\n\t\tcase <-client.ConnCloser.HasBeenClosed():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (client *Client) notifJWTExpired(repoID string) {\n\tmsg := new(Message)\n\tmsg.Type = \"jwt-expired\"\n\tcontent := fmt.Sprintf(\"{\\\"repo_id\\\":\\\"%s\\\"}\", repoID)\n\tmsg.Content = []byte(content)\n\tclient.WCh <- msg\n}\n"
  },
  {
    "path": "notification-server/dup2.go",
    "content": "//go:build !(linux && arm64)\n\npackage main\n\nimport (\n\t\"syscall\"\n)\n\nfunc Dup(from, to int) error {\n\treturn syscall.Dup2(from, to)\n}\n"
  },
  {
    "path": "notification-server/dup3.go",
    "content": "//go:build linux && arm64\n\npackage main\n\nimport (\n\t\"syscall\"\n)\n\nfunc Dup(from, to int) error {\n\treturn syscall.Dup3(from, to, 0)\n}\n"
  },
  {
    "path": "notification-server/event.go",
    "content": "package main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"time\"\n\n\tlog \"github.com/sirupsen/logrus\"\n)\n\ntype RepoUpdateEvent struct {\n\tRepoID   string `json:\"repo_id\"`\n\tCommitID string `json:\"commit_id\"`\n}\n\ntype FileLockEvent struct {\n\tRepoID      string `json:\"repo_id\"`\n\tPath        string `json:\"path\"`\n\tChangeEvent string `json:\"change_event\"`\n\tLockUser    string `json:\"lock_user\"`\n}\n\ntype FolderPermEvent struct {\n\tRepoID      string `json:\"repo_id\"`\n\tPath        string `json:\"path\"`\n\tType        string `json:\"type\"`\n\tChangeEvent string `json:\"change_event\"`\n\tUser        string `json:\"user\"`\n\tGroup       int    `json:\"group\"`\n\tPerm        string `json:\"perm\"`\n}\n\ntype CommentEvent struct {\n\tRepoID   string `json:\"repo_id\"`\n\tType     string `json:\"type\"`\n\tFileUUID string `json:\"file_uuid\"`\n\tFilePath string `json:\"file_path\"`\n}\n\nfunc Notify(msg *Message) {\n\tvar repoID string\n\t// userList is the list of users who need to be notified, if it is nil, all subscribed users will be notified.\n\tvar userList map[string]struct{}\n\n\tcontent := msg.Content\n\tswitch msg.Type {\n\tcase \"repo-update\":\n\t\tvar event RepoUpdateEvent\n\t\terr := json.Unmarshal(content, &event)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn\n\t\t}\n\t\trepoID = event.RepoID\n\tcase \"file-lock-changed\":\n\t\tvar event FileLockEvent\n\t\terr := json.Unmarshal(content, &event)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn\n\t\t}\n\t\trepoID = event.RepoID\n\tcase \"folder-perm-changed\":\n\t\tvar event FolderPermEvent\n\t\terr := json.Unmarshal(content, &event)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn\n\t\t}\n\t\trepoID = event.RepoID\n\t\tif event.User != \"\" {\n\t\t\tuserList = make(map[string]struct{})\n\t\t\tuserList[event.User] = struct{}{}\n\t\t} else if event.Group != -1 {\n\t\t\tuserList = getGroupMembers(event.Group)\n\t\t}\n\tcase \"comment-update\":\n\t\tvar event CommentEvent\n\t\terr := json.Unmarshal(content, &event)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\treturn\n\t\t}\n\t\trepoID = event.RepoID\n\tdefault:\n\t\treturn\n\t}\n\n\tclients := make(map[uint64]*Client)\n\n\tsubMutex.RLock()\n\tsubscribers := subscriptions[repoID]\n\tif subscribers == nil {\n\t\tsubMutex.RUnlock()\n\t\treturn\n\t}\n\tsubMutex.RUnlock()\n\n\tsubscribers.Mutex.RLock()\n\tfor clientID, client := range subscribers.Clients {\n\t\tclients[clientID] = client\n\t}\n\tsubscribers.Mutex.RUnlock()\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.Printf(\"panic: %v\\n%s\", err, debug.Stack())\n\t\t\t}\n\t\t}()\n\t\t// In order to avoid being blocked on a Client for a long time, it is necessary to write WCh in a non-blocking way,\n\t\t// and the waiting WCh needs to be blocked and processed after other Clients have finished writing.\n\t\tvalue := reflect.ValueOf(msg)\n\t\tvar branches []reflect.SelectCase\n\t\tfor _, client := range clients {\n\t\t\tif !needToNotif(userList, client.User) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbranch := reflect.SelectCase{Dir: reflect.SelectSend, Chan: reflect.ValueOf(client.WCh), Send: value}\n\t\t\tbranches = append(branches, branch)\n\t\t}\n\n\t\tfor len(branches) != 0 {\n\t\t\tindex, _, _ := reflect.Select(branches)\n\t\t\tbranches = append(branches[:index], branches[index+1:]...)\n\t\t}\n\t}()\n}\n\nfunc getGroupMembers(group int) map[string]struct{} {\n\tquery := `SELECT user_name FROM GroupUser WHERE group_id = ?`\n\tctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)\n\tdefer cancel()\n\tstmt, err := ccnetDB.PrepareContext(ctx, query)\n\tif err != nil {\n\t\tlog.Printf(\"failed to prepare sql: %s：%v\", query, err)\n\t\treturn nil\n\t}\n\tdefer stmt.Close()\n\n\trows, err := stmt.QueryContext(ctx, group)\n\tif err != nil {\n\t\tlog.Printf(\"failed to query sql: %v\", err)\n\t\treturn nil\n\t}\n\tdefer rows.Close()\n\n\tuserList := make(map[string]struct{})\n\tvar userName string\n\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&userName); err == nil {\n\t\t\tuserList[userName] = struct{}{}\n\t\t}\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\tlog.Printf(\"failed to scan sql rows: %v\", err)\n\t\treturn nil\n\t}\n\n\treturn userList\n}\n\nfunc needToNotif(userList map[string]struct{}, user string) bool {\n\tif userList == nil {\n\t\treturn true\n\t}\n\n\t_, ok := userList[user]\n\treturn ok\n}\n"
  },
  {
    "path": "notification-server/go.mod",
    "content": "module github.com/haiwen/seafile-server/notification-server\n\ngo 1.17\n\nrequire (\n\tgithub.com/dgraph-io/ristretto v0.2.0\n\tgithub.com/go-sql-driver/mysql v1.5.0\n\tgithub.com/golang-jwt/jwt/v5 v5.2.2\n\tgithub.com/gorilla/mux v1.8.0\n\tgithub.com/gorilla/websocket v1.4.2\n\tgithub.com/sirupsen/logrus v1.9.3\n)\n\nrequire (\n\tgithub.com/cespare/xxhash/v2 v2.1.1 // indirect\n\tgithub.com/dustin/go-humanize v1.0.1 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgolang.org/x/sys v0.11.0 //indirect\n)\n"
  },
  {
    "path": "notification-server/go.sum",
    "content": "github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE=\ngithub.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU=\ngithub.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=\ngithub.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=\ngithub.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=\ngithub.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=\ngithub.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=\ngithub.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=\ngithub.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=\ngithub.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=\ngithub.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=\ngithub.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=\ngolang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "notification-server/logger.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nconst (\n\ttimestampFormat = \"[2006-01-02 15:04:05] \"\n)\n\ntype LogFormatter struct{}\n\nfunc (f *LogFormatter) Format(entry *log.Entry) ([]byte, error) {\n\tlevelStr := entry.Level.String()\n\tif levelStr == \"fatal\" {\n\t\tlevelStr = \"ERROR\"\n\t} else {\n\t\tlevelStr = strings.ToUpper(levelStr)\n\t}\n\tlevel := fmt.Sprintf(\"[%s] \", levelStr)\n\tappName := \"\"\n\tif logToStdout {\n\t\tappName = \"[notification-server] \"\n\t}\n\tbuf := make([]byte, 0, len(appName)+len(timestampFormat)+len(level)+len(entry.Message)+1)\n\tif logToStdout {\n\t\tbuf = append(buf, appName...)\n\t}\n\tbuf = entry.Time.AppendFormat(buf, timestampFormat)\n\tbuf = append(buf, level...)\n\tbuf = append(buf, entry.Message...)\n\tbuf = append(buf, '\\n')\n\treturn buf, nil\n}\n"
  },
  {
    "path": "notification-server/server.go",
    "content": "package main\n\nimport (\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t_ \"github.com/go-sql-driver/mysql\"\n\tjwt \"github.com/golang-jwt/jwt/v5\"\n\t\"github.com/gorilla/mux\"\n\t\"github.com/gorilla/websocket\"\n\tlog \"github.com/sirupsen/logrus\"\n)\n\nvar configDir string\nvar logFile, absLogFile string\nvar privateKey string\nvar host string\nvar port uint32\nvar logFp *os.File\n\nvar ccnetDB *sql.DB\n\nvar logToStdout bool\n\nfunc init() {\n\tflag.StringVar(&configDir, \"c\", \"\", \"config directory\")\n\tflag.StringVar(&logFile, \"l\", \"\", \"log file path\")\n\n\tenv := os.Getenv(\"SEAFILE_LOG_TO_STDOUT\")\n\tif env == \"true\" {\n\t\tlogToStdout = true\n\t}\n\n\tlog.SetFormatter(&LogFormatter{})\n}\n\nfunc loadNotifConfig() {\n\thost = os.Getenv(\"NOTIFICATION_SERVER_HOST\")\n\tif host == \"\" {\n\t\thost = \"0.0.0.0\"\n\t}\n\n\tport = 8083\n\tif os.Getenv(\"NOTIFICATION_SERVER_PORT\") != \"\" {\n\t\ti, err := strconv.Atoi(os.Getenv(\"NOTIFICATION_SERVER_PORT\"))\n\t\tif err == nil {\n\t\t\tport = uint32(i)\n\t\t}\n\t}\n\n\tlogLevel := os.Getenv(\"NOTIFICATION_SERVER_LOG_LEVEL\")\n\tif logLevel == \"\" {\n\t\tlogLevel = \"info\"\n\t}\n\n\tlevel, err := log.ParseLevel(logLevel)\n\tif err != nil {\n\t\tlog.Info(\"use the default log level: info\")\n\t\tlog.SetLevel(log.InfoLevel)\n\t} else {\n\t\tlog.SetLevel(level)\n\t}\n}\n\nfunc loadCcnetDB() {\n\toption, err := loadDBOptionFromEnv()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load database from env: %v\", err)\n\t}\n\n\tvar dsn string\n\tif option.UnixSocket == \"\" {\n\t\tdsn = fmt.Sprintf(\"%s:%s@tcp(%s:%d)/%s?tls=%t&readTimeout=60s&writeTimeout=60s\", option.User, option.Password, option.Host, option.Port, option.CcnetDbName, option.UseTLS)\n\t} else {\n\t\tdsn = fmt.Sprintf(\"%s:%s@unix(%s)/%s?readTimeout=60s&writeTimeout=60s\", option.User, option.Password, option.UnixSocket, option.CcnetDbName)\n\t}\n\tccnetDB, err = sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to open database: %v\", err)\n\t}\n\tif err := ccnetDB.Ping(); err != nil {\n\t\tlog.Fatalf(\"Failed to connected to mysql: %v\", err)\n\t}\n\tccnetDB.SetConnMaxLifetime(5 * time.Minute)\n\tccnetDB.SetMaxOpenConns(8)\n\tccnetDB.SetMaxIdleConns(8)\n}\n\ntype DBOption struct {\n\tUser          string\n\tPassword      string\n\tHost          string\n\tPort          int\n\tCcnetDbName   string\n\tSeafileDbName string\n\tUnixSocket    string\n\tUseTLS        bool\n}\n\nfunc loadDBOptionFromEnv() (*DBOption, error) {\n\tuser := os.Getenv(\"SEAFILE_MYSQL_DB_USER\")\n\tif user == \"\" {\n\t\treturn nil, fmt.Errorf(\"failed to read SEAFILE_MYSQL_DB_USER\")\n\t}\n\tpassword := os.Getenv(\"SEAFILE_MYSQL_DB_PASSWORD\")\n\tif password == \"\" {\n\t\treturn nil, fmt.Errorf(\"failed to read SEAFILE_MYSQL_DB_PASSWORD\")\n\t}\n\thost := os.Getenv(\"SEAFILE_MYSQL_DB_HOST\")\n\tif host == \"\" {\n\t\treturn nil, fmt.Errorf(\"failed to read SEAFILE_MYSQL_DB_HOST\")\n\t}\n\tport := 3306\n\tportStr := os.Getenv(\"SEAFILE_MYSQL_DB_PORT\")\n\tif portStr != \"\" {\n\t\tp, _ := strconv.ParseUint(portStr, 10, 32)\n\t\tif p > 0 {\n\t\t\tport = int(p)\n\t\t}\n\t}\n\tccnetDbName := os.Getenv(\"SEAFILE_MYSQL_DB_CCNET_DB_NAME\")\n\tif ccnetDbName == \"\" {\n\t\tccnetDbName = \"ccnet_db\"\n\t\tlog.Infof(\"Failed to read SEAFILE_MYSQL_DB_CCNET_DB_NAME, use ccnet_db by default\")\n\t}\n\tseafileDbName := os.Getenv(\"SEAFILE_MYSQL_DB_SEAFILE_DB_NAME\")\n\tif seafileDbName == \"\" {\n\t\tseafileDbName = \"seafile_db\"\n\t\tlog.Infof(\"Failed to read SEAFILE_MYSQL_DB_SEAFILE_DB_NAME, use seafile_db by default\")\n\t}\n\n\tlog.Infof(\"Database: user = %s\", user)\n\tlog.Infof(\"Database: host = %s\", host)\n\tlog.Infof(\"Database: port = %d\", port)\n\tlog.Infof(\"Database: ccnet_db_name = %s\", ccnetDbName)\n\tlog.Infof(\"Database: seafile_db_name = %s\", seafileDbName)\n\n\toption := new(DBOption)\n\toption.User = user\n\toption.Password = password\n\toption.Host = host\n\toption.Port = port\n\toption.CcnetDbName = ccnetDbName\n\toption.SeafileDbName = seafileDbName\n\treturn option, nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif configDir == \"\" {\n\t\tlog.Fatal(\"config directory must be specified.\")\n\t}\n\n\t_, err := os.Stat(configDir)\n\tif os.IsNotExist(err) {\n\t\tlog.Fatalf(\"config directory %s doesn't exist: %v.\", configDir, err)\n\t}\n\n\tif logToStdout {\n\t\t// Use default output (StdOut)\n\t} else if logFile == \"\" {\n\t\tabsLogFile = filepath.Join(configDir, \"notification-server.log\")\n\t\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to open or create log file: %v\", err)\n\t\t}\n\t\tlogFp = fp\n\t\tlog.SetOutput(fp)\n\t} else if logFile != \"-\" {\n\t\tabsLogFile, err = filepath.Abs(logFile)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to convert log file path to absolute path: %v\", err)\n\t\t}\n\t\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to open or create log file: %v\", err)\n\t\t}\n\t\tlogFp = fp\n\t\tlog.SetOutput(fp)\n\t}\n\n\tif absLogFile != \"\" && !logToStdout {\n\t\tDup(int(logFp.Fd()), int(os.Stderr.Fd()))\n\t}\n\n\tif err := loadJwtPrivateKey(); err != nil {\n\t\tlog.Fatalf(\"Failed to read config: %v\", err)\n\t}\n\n\tloadNotifConfig()\n\tloadCcnetDB()\n\n\tInit()\n\n\tgo handleUser1Signal()\n\n\trouter := newHTTPRouter()\n\n\tlog.Info(\"notification server started.\")\n\n\tserver := new(http.Server)\n\tserver.Addr = fmt.Sprintf(\"%s:%d\", host, port)\n\tserver.Handler = router\n\n\terr = server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Infof(\"notificationserver exiting: %v\", err)\n\t}\n}\n\nfunc loadJwtPrivateKey() error {\n\tprivateKey = os.Getenv(\"JWT_PRIVATE_KEY\")\n\tif privateKey == \"\" {\n\t\treturn fmt.Errorf(\"failed to read JWT_PRIVATE_KEY\")\n\t}\n\n\treturn nil\n}\n\nfunc handleUser1Signal() {\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGUSR1)\n\n\tfor {\n\t\t<-signalChan\n\t\tlogRotate()\n\t}\n}\n\nfunc logRotate() {\n\tif logToStdout {\n\t\treturn\n\t}\n\tfp, err := os.OpenFile(absLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to reopen notification log: %v\", err)\n\t}\n\tlog.SetOutput(fp)\n\tif logFp != nil {\n\t\tlogFp.Close()\n\t\tlogFp = fp\n\t}\n\n\tDup(int(logFp.Fd()), int(os.Stderr.Fd()))\n}\n\nfunc newHTTPRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tr.Handle(\"/\", appHandler(messageCB))\n\tr.Handle(\"/events{slash:\\\\/?}\", appHandler(eventCB))\n\tr.Handle(\"/ping{slash:\\\\/?}\", appHandler(pingCB))\n\n\treturn r\n}\n\n// Any http request will be automatically upgraded to websocket.\nfunc messageCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tupgrader := newUpgrader()\n\tconn, err := upgrader.Upgrade(rsp, r, nil)\n\tif err != nil {\n\t\tlog.Warnf(\"failed to upgrade http to websocket: %v\", err)\n\t\t// Don't return eror here, because the upgrade fails, then Upgrade replies to the client with an HTTP error response.\n\t\treturn nil\n\t}\n\n\taddr := r.Header.Get(\"x-forwarded-for\")\n\tif addr == \"\" {\n\t\taddr = conn.RemoteAddr().String()\n\t}\n\tclient := NewClient(conn, addr)\n\tRegisterClient(client)\n\tclient.HandleMessages()\n\n\treturn nil\n}\n\nfunc eventCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tmsg := Message{}\n\n\ttoken := getAuthorizationToken(r.Header)\n\tif !checkAuthToken(token) {\n\t\treturn &appError{Error: nil,\n\t\t\tMessage: \"Notification token not match\",\n\t\t\tCode:    http.StatusBadRequest,\n\t\t}\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn &appError{Error: err,\n\t\t\tMessage: \"\",\n\t\t\tCode:    http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tif err := json.Unmarshal(body, &msg); err != nil {\n\t\treturn &appError{Error: err,\n\t\t\tMessage: \"\",\n\t\t\tCode:    http.StatusInternalServerError,\n\t\t}\n\t}\n\n\tNotify(&msg)\n\n\treturn nil\n}\n\nfunc getAuthorizationToken(h http.Header) string {\n\tauth := h.Get(\"Authorization\")\n\tsplitResult := strings.Split(auth, \" \")\n\tif len(splitResult) > 1 {\n\t\treturn splitResult[1]\n\t}\n\treturn \"\"\n}\n\nfunc checkAuthToken(tokenString string) bool {\n\tif len(tokenString) == 0 {\n\t\treturn false\n\t}\n\tclaims := new(myClaims)\n\ttoken, err := jwt.ParseWithClaims(tokenString, claims, func(token *jwt.Token) (interface{}, error) {\n\t\treturn []byte(privateKey), nil\n\t})\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif !token.Valid {\n\t\treturn false\n\t}\n\n\tnow := time.Now()\n\n\treturn claims.Exp > now.Unix()\n}\n\nfunc newUpgrader() *websocket.Upgrader {\n\tupgrader := &websocket.Upgrader{\n\t\tReadBufferSize:  4096,\n\t\tWriteBufferSize: 1024,\n\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\treturn true\n\t\t},\n\t}\n\n\treturn upgrader\n}\n\nfunc pingCB(rsp http.ResponseWriter, r *http.Request) *appError {\n\tfmt.Fprintln(rsp, \"{\\\"ret\\\": \\\"pong\\\"}\")\n\treturn nil\n}\n\ntype appError struct {\n\tError   error\n\tMessage string\n\tCode    int\n}\n\ntype appHandler func(http.ResponseWriter, *http.Request) *appError\n\nfunc (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\te := fn(w, r)\n\tif e != nil {\n\t\tif e.Error != nil && e.Code == http.StatusInternalServerError {\n\t\t\tlog.Infof(\"path %s internal server error: %v\\n\", r.URL.Path, e.Error)\n\t\t}\n\t\thttp.Error(w, e.Message, e.Code)\n\t}\n}\n"
  },
  {
    "path": "notification-server/subscriptions.go",
    "content": "package main\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/dgraph-io/ristretto/z\"\n\t\"github.com/gorilla/websocket\"\n)\n\nconst (\n\tchanBufSize = 10\n)\n\n// clients is a map from client id to Client structs.\n// It contains all current connected clients. Each client is identified by 64-bit ID.\nvar clients map[uint64]*Client\nvar clientsMutex sync.RWMutex\n\n// Use atomic operation to increase this value.\nvar nextClientID uint64 = 1\n\n// subscriptions is a map from repo_id to Subscribers struct.\n// It's protected by rw mutex.\nvar subscriptions map[string]*Subscribers\nvar subMutex sync.RWMutex\n\n// Client contains information about a client.\n// Two go routines are associated with each client to handle message reading and writting.\n// Messages sent to the client have to be written into WCh, since only one go routine can write to a websocket connection.\ntype Client struct {\n\t// The ID of this client\n\tID uint64\n\t// Websocket connection.\n\tconn *websocket.Conn\n\t// Connections do not support concurrent writers. Protect write with a mutex.\n\tconnMutex sync.Mutex\n\n\t// WCh is used to write messages to a client.\n\t// The structs written into the channel will be converted to JSON and sent to client.\n\tWCh chan interface{}\n\n\t// Repos is the repos this client subscribed to.\n\tRepos      map[string]int64\n\tReposMutex sync.Mutex\n\t// Alive is the last time received pong.\n\tAlive      time.Time\n\tConnCloser *z.Closer\n\t// Addr is the address of client.\n\tAddr string\n\t// User is the user of client.\n\tUser string\n}\n\n// Subscribers contains the clients who subscribe to a repo's notifications.\ntype Subscribers struct {\n\t// Clients is a map from client id to Client struct, protected by rw mutex.\n\tClients map[uint64]*Client\n\tMutex   sync.RWMutex\n}\n\n// Init inits clients and subscriptions.\nfunc Init() {\n\tclients = make(map[uint64]*Client)\n\tsubscriptions = make(map[string]*Subscribers)\n}\n\n// NewClient creates a new client.\nfunc NewClient(conn *websocket.Conn, addr string) *Client {\n\tclient := new(Client)\n\tclient.ID = atomic.AddUint64(&nextClientID, 1)\n\tclient.conn = conn\n\tclient.WCh = make(chan interface{}, chanBufSize)\n\tclient.Repos = make(map[string]int64)\n\tclient.Alive = time.Now()\n\tclient.Addr = addr\n\tclient.ConnCloser = z.NewCloser(0)\n\n\treturn client\n}\n\n// Register adds the client to the list of clients.\nfunc RegisterClient(client *Client) {\n\tclientsMutex.Lock()\n\tclients[client.ID] = client\n\tclientsMutex.Unlock()\n}\n\n// Unregister deletes the client from the list of clients.\nfunc UnregisterClient(client *Client) {\n\tclientsMutex.Lock()\n\tdelete(clients, client.ID)\n\tclientsMutex.Unlock()\n}\n\nfunc newSubscribers(client *Client) *Subscribers {\n\tsubscribers := new(Subscribers)\n\tsubscribers.Clients = make(map[uint64]*Client)\n\tsubscribers.Clients[client.ID] = client\n\n\treturn subscribers\n}\n"
  },
  {
    "path": "pytest.ini",
    "content": "[pytest]\naddopts = -vv -s\nlog_format = %(asctime)s:%(name)s:%(levelname)s:%(message)s\nlog_date_format = %Y-%m-%d %H:%M:%S\n# log_cli_level = info"
  },
  {
    "path": "python/LICENSE.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "python/Makefile.am",
    "content": "SUBDIRS = seafile seaserv\n"
  },
  {
    "path": "python/seafile/Makefile.am",
    "content": "seafiledir=${pyexecdir}/seafile\n\nseafile_PYTHON = __init__.py rpcclient.py\n"
  },
  {
    "path": "python/seafile/__init__.py",
    "content": "from .rpcclient import SeafServerThreadedRpcClient as ServerThreadedRpcClient\n\nclass TaskType(object):\n    DOWNLOAD = 0\n    UPLOAD = 1\n"
  },
  {
    "path": "python/seafile/rpcclient.py",
    "content": "from pysearpc import searpc_func, SearpcError, NamedPipeClient\n\nclass SeafServerThreadedRpcClient(NamedPipeClient):\n\n    def __init__(self, pipe_path):\n        NamedPipeClient.__init__(self, pipe_path, \"seafserv-threaded-rpcserver\")\n\n    # repo manipulation\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"string\"])\n    def seafile_create_repo(name, desc, owner_email, passwd, enc_version, pwd_hash_algo, pwd_hash_params):\n        pass\n    create_repo = seafile_create_repo\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"string\", \"string\"])\n    def seafile_create_enc_repo(repo_id, name, desc, owner_email, magic, random_key, salt, enc_version, pwd_hash, pwd_hash_algo, pwd_hash_params):\n        pass\n    create_enc_repo = seafile_create_enc_repo\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\"])\n    def seafile_get_repos_by_id_prefix(id_prefix, start, limit):\n        pass\n    get_repos_by_id_prefix = seafile_get_repos_by_id_prefix\n\n    @searpc_func(\"object\", [\"string\"])\n    def seafile_get_repo(repo_id):\n        pass\n    get_repo = seafile_get_repo\n\n    @searpc_func(\"int\", [\"string\"])\n    def seafile_destroy_repo(repo_id):\n        pass\n    remove_repo = seafile_destroy_repo\n\n    @searpc_func(\"objlist\", [\"int\", \"int\", \"string\", \"int\"])\n    def seafile_get_repo_list(start, limit, order_by, ret_virt_repo):\n        pass\n    get_repo_list = seafile_get_repo_list\n\n    @searpc_func(\"int64\", [])\n    def seafile_count_repos():\n        pass\n    count_repos = seafile_count_repos\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_edit_repo(repo_id, name, description, user):\n        pass\n    edit_repo = seafile_edit_repo\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_is_repo_owner(user_id, repo_id):\n        pass\n    is_repo_owner = seafile_is_repo_owner\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_set_repo_owner(email, repo_id):\n        pass\n    set_repo_owner = seafile_set_repo_owner\n\n    @searpc_func(\"string\", [\"string\"])\n    def seafile_get_repo_owner(repo_id):\n        pass\n    get_repo_owner = seafile_get_repo_owner\n\n    @searpc_func(\"objlist\", [])\n    def seafile_get_orphan_repo_list():\n        pass\n    get_orphan_repo_list = seafile_get_orphan_repo_list\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\", \"int\"])\n    def seafile_list_owned_repos(user_id, ret_corrupted, start, limit):\n        pass\n    list_owned_repos = seafile_list_owned_repos\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def seafile_search_repos_by_name(name):\n        pass\n    search_repos_by_name = seafile_search_repos_by_name\n\n    @searpc_func(\"int64\", [\"string\"])\n    def seafile_server_repo_size(repo_id):\n        pass\n    server_repo_size = seafile_server_repo_size\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_repo_set_access_property(repo_id, role):\n        pass\n    repo_set_access_property = seafile_repo_set_access_property\n\n    @searpc_func(\"string\", [\"string\"])\n    def seafile_repo_query_access_property(repo_id):\n        pass\n    repo_query_access_property = seafile_repo_query_access_property\n\n    @searpc_func(\"int\",  [\"string\", \"string\", \"string\"])\n    def seafile_revert_on_server(repo_id, commit_id, user_name):\n        pass\n    revert_on_server = seafile_revert_on_server\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\"])\n    def seafile_diff():\n        pass\n    get_diff = seafile_diff\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\", \"string\"])\n    def seafile_post_file(repo_id, tmp_file_path, parent_dir, filename, user):\n        pass\n    post_file = seafile_post_file\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_post_dir(repo_id, parent_dir, new_dir_name, user):\n        pass\n    post_dir = seafile_post_dir\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_post_empty_file(repo_id, parent_dir, filename, user):\n        pass\n    post_empty_file = seafile_post_empty_file\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\"])\n    def seafile_put_file(repo_id, tmp_file_path, parent_dir, filename, user, head_id):\n        pass\n    put_file = seafile_put_file\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_del_file(repo_id, parent_dir, filename, user):\n        pass\n    del_file = seafile_del_file\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def seafile_batch_del_files(repo_id, filepaths, user):\n        pass\n    batch_del_files = seafile_batch_del_files\n\n    @searpc_func(\"object\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"int\"])\n    def seafile_copy_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, user, need_progress, synchronous):\n        pass\n    copy_file = seafile_copy_file\n\n    @searpc_func(\"object\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"string\", \"int\", \"int\"])\n    def seafile_move_file(src_repo, src_dir, src_filename, dst_repo, dst_dir, dst_filename, replace, user, need_progress, synchronous):\n        pass\n    move_file = seafile_move_file\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\", \"string\"])\n    def seafile_rename_file(repo_id, parent_dir, oldname, newname, user):\n        pass\n    rename_file = seafile_rename_file\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_is_valid_filename(repo_id, filename):\n        pass\n    is_valid_filename = seafile_is_valid_filename\n\n    @searpc_func(\"object\", [\"string\", \"int\", \"string\"])\n    def seafile_get_commit(repo_id, version, commit_id):\n        pass\n    get_commit = seafile_get_commit\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"int\", \"int\"])\n    def seafile_list_file_blocks(repo_id, file_id, offset, limit):\n        pass\n    list_file_blocks = seafile_list_file_blocks\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"int\", \"int\"])\n    def seafile_list_dir(repo_id, dir_id, offset, limit):\n        pass\n    list_dir = seafile_list_dir\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"sting\", \"string\", \"int\", \"int\"])\n    def list_dir_with_perm(repo_id, dir_path, dir_id, user, offset, limit):\n        pass\n\n    @searpc_func(\"int64\", [\"string\", \"int\", \"string\"])\n    def seafile_get_file_size(store_id, version, file_id):\n        pass\n    get_file_size = seafile_get_file_size\n\n    @searpc_func(\"int64\", [\"string\", \"int\", \"string\"])\n    def seafile_get_dir_size(store_id, version, dir_id):\n        pass\n    get_dir_size = seafile_get_dir_size\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\"])\n    def seafile_list_dir_by_path(repo_id, commit_id, path):\n        pass\n    list_dir_by_path = seafile_list_dir_by_path\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\"])\n    def seafile_get_dir_id_by_commit_and_path(repo_id, commit_id, path):\n        pass\n    get_dir_id_by_commit_and_path = seafile_get_dir_id_by_commit_and_path\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def seafile_get_file_id_by_path(repo_id, path):\n        pass\n    get_file_id_by_path = seafile_get_file_id_by_path\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def seafile_get_dir_id_by_path(repo_id, path):\n        pass\n    get_dir_id_by_path = seafile_get_dir_id_by_path\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\"])\n    def seafile_get_file_id_by_commit_and_path(repo_id, commit_id, path):\n        pass\n    get_file_id_by_commit_and_path = seafile_get_file_id_by_commit_and_path\n\n    @searpc_func(\"object\", [\"string\", \"string\"])\n    def seafile_get_dirent_by_path(repo_id, commit_id, path):\n        pass\n    get_dirent_by_path = seafile_get_dirent_by_path\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\", \"int\"])\n    def seafile_list_file_revisions(repo_id, commit_id, path, limit):\n        pass\n    list_file_revisions = seafile_list_file_revisions\n\n    @searpc_func(\"objlist\", [\"string\", \"string\"])\n    def seafile_calc_files_last_modified(repo_id, parent_dir, limit):\n        pass\n    calc_files_last_modified = seafile_calc_files_last_modified\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_revert_file(repo_id, commit_id, path, user):\n        pass\n    revert_file = seafile_revert_file\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def seafile_check_repo_blocks_missing(repo_id, blklist):\n        pass\n    check_repo_blocks_missing = seafile_check_repo_blocks_missing\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_revert_dir(repo_id, commit_id, path, user):\n        pass\n    revert_dir = seafile_revert_dir\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"string\", \"string\", \"int\"])\n    def get_deleted(repo_id, show_days, path, scan_stat, limit):\n        pass\n\n    # share repo to user\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_add_share(repo_id, from_email, to_email, permission):\n        pass\n    add_share = seafile_add_share\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"int\", \"int\"])\n    def seafile_list_share_repos(email, query_col, start, limit):\n        pass\n    list_share_repos = seafile_list_share_repos\n\n    @searpc_func(\"objlist\", [\"string\", \"string\"])\n    def seafile_list_repo_shared_to(from_user, repo_id):\n        pass\n    list_repo_shared_to = seafile_list_repo_shared_to\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\"])\n    def share_subdir_to_user(repo_id, path, owner, share_user, permission, passwd):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def unshare_subdir_for_user(repo_id, path, owner, share_user):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\", \"string\"])\n    def update_share_subdir_perm_for_user(repo_id, path, owner, share_user, permission):\n        pass\n\n    @searpc_func(\"object\", [\"string\", \"string\", \"string\", \"int\"])\n    def get_shared_repo_by_path(repo_id, path, shared_to, is_org):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\", \"string\", \"int\", \"int\"])\n    def seafile_list_org_share_repos(org_id, email, query_col, start, limit):\n        pass\n    list_org_share_repos = seafile_list_org_share_repos\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def seafile_remove_share(repo_id, from_email, to_email):\n        pass\n    remove_share = seafile_remove_share\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def set_share_permission(repo_id, from_email, to_email, permission):\n        pass\n\n    # share repo to group\n    @searpc_func(\"int\", [\"string\", \"int\", \"string\", \"string\"])\n    def seafile_group_share_repo(repo_id, group_id, user_name, permisson):\n        pass\n    group_share_repo = seafile_group_share_repo\n\n    @searpc_func(\"int\", [\"string\", \"int\", \"string\"])\n    def seafile_group_unshare_repo(repo_id, group_id, user_name):\n        pass\n    group_unshare_repo = seafile_group_unshare_repo\n\n    @searpc_func(\"string\", [\"string\"])\n    def seafile_get_shared_groups_by_repo(repo_id):\n        pass\n    get_shared_groups_by_repo=seafile_get_shared_groups_by_repo\n\n    @searpc_func(\"objlist\", [\"string\", \"string\"])\n    def seafile_list_repo_shared_group(from_user, repo_id):\n        pass\n    list_repo_shared_group = seafile_list_repo_shared_group\n\n    @searpc_func(\"object\", [\"string\", \"string\", \"int\", \"int\"])\n    def get_group_shared_repo_by_path(repo_id, path, group_id, is_org):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_group_repos_by_user (user):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\"])\n    def get_org_group_repos_by_user (user, org_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\"])\n    def seafile_get_shared_users_for_subdir(repo_id, path, from_user):\n        pass\n    get_shared_users_for_subdir = seafile_get_shared_users_for_subdir\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\"])\n    def seafile_get_shared_groups_for_subdir(repo_id, path, from_user):\n        pass\n    get_shared_groups_for_subdir = seafile_get_shared_groups_for_subdir\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"int\", \"string\", \"string\"])\n    def share_subdir_to_group(repo_id, path, owner, share_group, permission, passwd):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"int\"])\n    def unshare_subdir_for_group(repo_id, path, owner, share_group):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"int\", \"string\"])\n    def update_share_subdir_perm_for_group(repo_id, path, owner, share_group, permission):\n        pass\n\n    @searpc_func(\"string\", [\"int\"])\n    def seafile_get_group_repoids(group_id):\n        pass\n    get_group_repoids = seafile_get_group_repoids\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def seafile_get_repos_by_group(group_id):\n        pass\n    get_repos_by_group = seafile_get_repos_by_group\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_group_repos_by_owner(user_name):\n        pass\n\n    @searpc_func(\"string\", [\"string\"])\n    def get_group_repo_owner(repo_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def seafile_remove_repo_group(group_id, user_name):\n        pass\n    remove_repo_group = seafile_remove_repo_group\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"string\"])\n    def set_group_repo_permission(group_id, repo_id, permission):\n        pass\n\n    # branch and commit\n    @searpc_func(\"objlist\", [\"string\"])\n    def seafile_branch_gets(repo_id):\n        pass\n    branch_gets = seafile_branch_gets\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\"])\n    def seafile_get_commit_list(repo_id, offset, limit):\n        pass\n    get_commit_list = seafile_get_commit_list\n\n\n    ###### Token ####################\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def seafile_set_repo_token(repo_id, email, token):\n        pass\n    set_repo_token = seafile_set_repo_token\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def seafile_get_repo_token_nonnull(repo_id, email):\n        \"\"\"Get the token of the repo for the email user. If the token does not\n        exist, a new one is generated and returned.\n\n        \"\"\"\n        pass\n    get_repo_token_nonnull = seafile_get_repo_token_nonnull\n\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def seafile_generate_repo_token(repo_id, email):\n        pass\n    generate_repo_token = seafile_generate_repo_token\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_delete_repo_token(repo_id, token, user):\n        pass\n    delete_repo_token = seafile_delete_repo_token\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def seafile_list_repo_tokens(repo_id):\n        pass\n    list_repo_tokens = seafile_list_repo_tokens\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def seafile_list_repo_tokens_by_email(email):\n        pass\n    list_repo_tokens_by_email = seafile_list_repo_tokens_by_email\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_delete_repo_tokens_by_peer_id(email, user_id):\n        pass\n    delete_repo_tokens_by_peer_id = seafile_delete_repo_tokens_by_peer_id\n\n    @searpc_func(\"int\", [\"string\"])\n    def delete_repo_tokens_by_email(email):\n        pass\n\n    ###### quota ##########\n    @searpc_func(\"int64\", [\"string\"])\n    def seafile_get_user_quota_usage(user_id):\n        pass\n    get_user_quota_usage = seafile_get_user_quota_usage\n\n    @searpc_func(\"int64\", [\"string\"])\n    def seafile_get_user_share_usage(user_id):\n        pass\n    get_user_share_usage = seafile_get_user_share_usage\n\n    @searpc_func(\"int64\", [\"int\"])\n    def seafile_get_org_quota_usage(org_id):\n        pass\n    get_org_quota_usage = seafile_get_org_quota_usage\n\n    @searpc_func(\"int64\", [\"int\", \"string\"])\n    def seafile_get_org_user_quota_usage(org_id, user):\n        pass\n    get_org_user_quota_usage = seafile_get_org_user_quota_usage\n\n    @searpc_func(\"int\", [\"string\", \"int64\"])\n    def set_user_quota(user, quota):\n        pass\n\n    @searpc_func(\"int64\", [\"string\"])\n    def get_user_quota(user):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"int64\"])\n    def set_org_quota(org_id, quota):\n        pass\n\n    @searpc_func(\"int64\", [\"int\"])\n    def get_org_quota(org_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"int64\"])\n    def set_org_user_quota(org_id, user, quota):\n        pass\n\n    @searpc_func(\"int64\", [\"int\", \"string\"])\n    def get_org_user_quota(org_id, user):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"int64\"])\n    def check_quota(repo_id, delta):\n        pass\n\n    @searpc_func(\"objlist\", [])\n    def list_user_quota_usage():\n        pass\n\n    # password management\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_check_passwd(repo_id, magic):\n        pass\n    check_passwd = seafile_check_passwd\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def seafile_set_passwd(repo_id, user, passwd):\n        pass\n    set_passwd = seafile_set_passwd\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_unset_passwd(repo_id, user):\n        pass\n    unset_passwd = seafile_unset_passwd\n\n    # repo permission checking\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def check_permission(repo_id, user):\n        pass\n\n    # folder permission check\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\"])\n    def check_permission_by_path(repo_id, path, user):\n        pass\n\n    # org repo\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"int\", \"int\"])\n    def seafile_create_org_repo(name, desc, user, passwd, magic, random_key, enc_version, org_id):\n        pass\n    create_org_repo = seafile_create_org_repo\n\n    @searpc_func(\"int\", [\"string\"])\n    def seafile_get_org_id_by_repo_id(repo_id):\n        pass\n    get_org_id_by_repo_id = seafile_get_org_id_by_repo_id\n\n    @searpc_func(\"objlist\", [\"int\", \"int\", \"int\"])\n    def seafile_get_org_repo_list(org_id, start, limit):\n        pass\n    get_org_repo_list = seafile_get_org_repo_list\n\n    @searpc_func(\"int\", [\"int\"])\n    def seafile_remove_org_repo_by_org_id(org_id):\n        pass\n    remove_org_repo_by_org_id = seafile_remove_org_repo_by_org_id\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def list_org_repos_by_owner(org_id, user):\n        pass\n\n    @searpc_func(\"string\", [\"string\"])\n    def get_org_repo_owner(repo_id):\n        pass\n\n    # org group repo\n    @searpc_func(\"int\", [\"string\", \"int\", \"int\", \"string\", \"string\"])\n    def add_org_group_repo(repo_id, org_id, group_id, owner, permission):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"int\", \"int\"])\n    def del_org_group_repo(repo_id, org_id, group_id):\n        pass\n\n    @searpc_func(\"string\", [\"int\", \"int\"])\n    def get_org_group_repoids(org_id, group_id):\n        pass\n\n    @searpc_func(\"string\", [\"int\", \"int\", \"string\"])\n    def get_org_group_repo_owner(org_id, group_id, repo_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def get_org_group_repos_by_owner(org_id, user):\n        pass\n\n    @searpc_func(\"string\", [\"int\", \"string\"])\n    def get_org_groups_by_repo(org_id, repo_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"int\", \"string\", \"string\"])\n    def set_org_group_repo_permission(org_id, group_id, repo_id, permission):\n        pass\n\n    # inner pub repo\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def set_inner_pub_repo(repo_id, permission):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def unset_inner_pub_repo(repo_id):\n        pass\n\n    @searpc_func(\"objlist\", [])\n    def list_inner_pub_repos():\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def list_inner_pub_repos_by_owner(user):\n        pass\n\n    @searpc_func(\"int64\", [])\n    def count_inner_pub_repos():\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def is_inner_pub_repo(repo_id):\n        pass\n\n    # org inner pub repo\n    @searpc_func(\"int\", [\"int\", \"string\", \"string\"])\n    def set_org_inner_pub_repo(org_id, repo_id, permission):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def unset_org_inner_pub_repo(org_id, repo_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def list_org_inner_pub_repos(org_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def list_org_inner_pub_repos_by_owner(org_id, user):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"int\"])\n    def set_repo_history_limit(repo_id, days):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def get_repo_history_limit(repo_id):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"int64\"])\n    def set_repo_valid_since(repo_id, timestamp):\n        pass\n\n    # virtual repo\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"string\", \"string\"])\n    def create_virtual_repo(origin_repo_id, path, repo_name, repo_desc, owner, passwd=''):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_virtual_repos_by_owner(owner):\n        pass\n\n    @searpc_func(\"object\", [\"string\", \"string\", \"string\"])\n    def get_virtual_repo(origin_repo, path, owner):\n        pass\n\n    # system default library\n    @searpc_func(\"string\", [])\n    def get_system_default_repo_id():\n        pass\n\n    # Change password\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_change_repo_passwd(repo_id, old_passwd, new_passwd, user):\n        pass\n    change_repo_passwd = seafile_change_repo_passwd\n\n    # Upgrade repo enc algorithm\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\", \"string\"])\n    def seafile_upgrade_repo_pwd_hash_algorithm (repo_id, user, passwd, pwd_hash_algo, pwd_hash_params):\n        pass\n    upgrade_repo_pwd_hash_algorithm = seafile_upgrade_repo_pwd_hash_algorithm\n\n    # Clean trash\n    @searpc_func(\"int\", [\"string\", \"int\"])\n    def clean_up_repo_history(repo_id, keep_days):\n        pass\n\n    # Trashed repos\n    @searpc_func(\"objlist\", [\"int\", \"int\"])\n    def get_trash_repo_list(start, limit):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def del_repo_from_trash(repo_id):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def restore_repo_from_trash(repo_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_trash_repos_by_owner(owner):\n        pass\n\n    @searpc_func(\"int\", [])\n    def empty_repo_trash():\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def empty_repo_trash_by_owner(owner):\n        pass\n\n    @searpc_func(\"object\", [\"string\"])\n    def empty_repo_trash_by_owner(owner):\n        pass\n\n    @searpc_func(\"object\", [\"int\", \"string\", \"string\"])\n    def generate_magic_and_random_key(enc_version, repo_id, password):\n        pass\n\n    @searpc_func(\"int64\", [])\n    def get_total_file_number():\n        pass\n\n    @searpc_func(\"int64\", [])\n    def get_total_storage():\n        pass\n\n    @searpc_func(\"object\", [\"string\", \"string\"])\n    def get_file_count_info_by_path(repo_id, path):\n        pass\n\n    @searpc_func(\"string\", [\"string\"])\n    def get_trash_repo_owner(repo_id):\n        pass\n\n    @searpc_func(\"int64\", [\"string\", \"string\"])\n    def seafile_get_upload_tmp_file_offset(repo_id, file_path):\n        pass\n    get_upload_tmp_file_offset = seafile_get_upload_tmp_file_offset\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"string\"])\n    def seafile_mkdir_with_parents (repo_id, parent_dir, relative_path, username):\n        pass\n    mkdir_with_parents = seafile_mkdir_with_parents\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def get_server_config_int (group, key):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"int\"])\n    def set_server_config_int (group, key, value):\n        pass\n\n    @searpc_func(\"int64\", [\"string\", \"string\"])\n    def get_server_config_int64 (group, key):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"int64\"])\n    def set_server_config_int64 (group, key, value):\n        pass\n\n    @searpc_func(\"string\", [\"string\", \"string\"])\n    def get_server_config_string (group, key):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def set_server_config_string (group, key, value):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def get_server_config_boolean (group, key):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\", \"int\"])\n    def set_server_config_boolean (group, key, value):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"int\"])\n    def repo_has_been_shared (repo_id, including_groups):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_shared_users_by_repo (repo_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def org_get_shared_users_by_repo (org_id, repo_id):\n        pass\n\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"int\"])\n    def convert_repo_path(repo_id, path, user, is_org):\n        pass\n\n    # repo status\n    @searpc_func(\"int\", [\"string\", \"int\"])\n    def set_repo_status(repo_id, status):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def get_repo_status(repo_id):\n        pass\n\n    # token for web access to repo\n    @searpc_func(\"string\", [\"string\", \"string\", \"string\", \"string\", \"int\"])\n    def seafile_web_get_access_token(repo_id, obj_id, op, username, use_onetime=1):\n        pass\n    web_get_access_token = seafile_web_get_access_token\n\n    @searpc_func(\"object\", [\"string\"])\n    def seafile_web_query_access_token(token):\n        pass\n    web_query_access_token = seafile_web_query_access_token\n\n    @searpc_func(\"string\", [\"string\"])\n    def seafile_query_zip_progress(token):\n        pass\n    query_zip_progress = seafile_query_zip_progress\n\n    @searpc_func(\"int\", [\"string\"])\n    def cancel_zip_task(token):\n        pass\n\n    ###### GC    ####################\n    @searpc_func(\"int\", [])\n    def seafile_gc():\n        pass\n    gc = seafile_gc\n\n    @searpc_func(\"int\", [])\n    def seafile_gc_get_progress():\n        pass\n    gc_get_progress = seafile_gc_get_progress\n\n    # password management\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def seafile_is_passwd_set(repo_id, user):\n        pass\n    is_passwd_set = seafile_is_passwd_set\n\n    @searpc_func(\"object\", [\"string\", \"string\"])\n    def seafile_get_decrypt_key(repo_id, user):\n        pass\n    get_decrypt_key = seafile_get_decrypt_key\n\n    # Copy tasks\n\n    @searpc_func(\"object\", [\"string\"])\n    def get_copy_task(task_id):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def cancel_copy_task(task_id):\n        pass\n\n    # event\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def publish_event(channel, content):\n        pass\n\n    @searpc_func(\"json\", [\"string\"])\n    def pop_event(channel):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"string\"])\n    def search_files(self, repo_id, search_str):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"string\"])\n    def search_files_by_path(self, repo_id, path, search_str):\n        pass\n\n    #user management\n    @searpc_func(\"int\", [\"string\", \"string\", \"int\", \"int\"])\n    def add_emailuser(self, email, passwd, is_staff, is_active):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def remove_emailuser(self, source, email):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def validate_emailuser(self, email, passwd):\n        pass\n\n    @searpc_func(\"object\", [\"string\"])\n    def get_emailuser(self, email):\n        pass\n\n    @searpc_func(\"object\", [\"string\"])\n    def get_emailuser_with_import(self, email):\n        pass\n\n    @searpc_func(\"object\", [\"int\"])\n    def get_emailuser_by_id(self, user_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\", \"string\"])\n    def get_emailusers(self, source, start, limit, status):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"string\", \"int\", \"int\"])\n    def search_emailusers(self, source, email_patt, start, limit):\n        pass\n\n    @searpc_func(\"int64\", [\"string\"])\n    def count_emailusers(self, source):\n        pass\n\n    @searpc_func(\"int64\", [\"string\"])\n    def count_inactive_emailusers(self, source):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def filter_emailusers_by_emails(self):\n        pass\n    \n    @searpc_func(\"int\", [\"string\", \"int\", \"string\", \"int\", \"int\"])\n    def update_emailuser(self, source, user_id, password, is_staff, is_active):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def update_role_emailuser(self, email, role):\n        pass\n\n    @searpc_func(\"objlist\", [])\n    def get_superusers(self):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"string\"])\n    def get_emailusers_in_list(self, source, user_list):\n        pass\n\n    @searpc_func(\"int\", [\"string\", \"string\"])\n    def update_emailuser_id (self, old_email, new_email):\n        pass\n\n    #group management\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\", \"int\"])\n    def create_group(self, group_name, user_name, gtype, parent_group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"string\", \"int\"])\n    def create_org_group(self, org_id, group_name, user_name, parent_group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\"])\n    def remove_group(self, group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"string\"])\n    def group_add_member(self, group_id, user_name, member_name):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"string\"])\n    def group_remove_member(self, group_id, user_name, member_name):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def group_set_admin(self, group_id, member_name):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def group_unset_admin(self, group_id, member_name):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def set_group_name(self, group_id, group_name):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def quit_group(self, group_id, user_name):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\"])\n    def get_groups(self, user_name, return_ancestors):\n        pass\n\n    @searpc_func(\"objlist\", [])\n    def list_all_departments(self):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"int\", \"string\"])\n    def get_all_groups(self, start, limit, source):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_ancestor_groups(self, group_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_top_groups(self, including_org):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_child_groups(self, group_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_descendants_groups(self, group_id):\n        pass\n\n    @searpc_func(\"object\", [\"int\"])\n    def get_group(self, group_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_group_members(self, group_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def get_members_with_prefix(self, group_id, prefix):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"int\"])\n    def check_group_staff(self, group_id, username, in_structure):\n        pass\n\n    @searpc_func(\"int\", [\"string\"])\n    def remove_group_user(self, username):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"int\"])\n    def is_group_user(self, group_id, user, in_structure):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def set_group_creator(self, group_id, user_name):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\"])\n    def search_groups(self, group_patt, start, limit):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"string\"])\n    def search_group_members(self, group_id, pattern):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_groups_members(self, group_ids):\n        pass\n    #org management\n    @searpc_func(\"int\", [\"string\", \"string\", \"string\"])\n    def create_org(self, org_name, url_prefix, creator):\n        pass\n\n    @searpc_func(\"int\", [\"int\"])\n    def remove_org(self, org_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"int\"])\n    def get_all_orgs(self, start, limit):\n        pass\n\n    @searpc_func(\"int64\", [])\n    def count_orgs(self):\n        pass\n\n    @searpc_func(\"object\", [\"string\"])\n    def get_org_by_url_prefix(self, url_prefix):\n        pass\n\n    @searpc_func(\"object\", [\"string\"])\n    def get_org_by_id(self, org_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\", \"int\"])\n    def add_org_user(self, org_id, email, is_staff):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def remove_org_user(self, org_id, email):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\"])\n    def get_orgs_by_user(self, email):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\", \"int\"])\n    def get_org_emailusers(self, url_prefix, start, limit):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"int\"])\n    def add_org_group(self, org_id, group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"int\"])\n    def remove_org_group(self, org_id, group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\"])\n    def is_org_group(self, group_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\"])\n    def get_org_id_by_group(self, group_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\", \"int\", \"int\"])\n    def get_org_groups(self, org_id, start, limit):\n        pass\n\n    @searpc_func(\"objlist\", [\"string\", \"int\"])\n    def get_org_groups_by_user (self, user, org_id):\n        pass\n\n    @searpc_func(\"objlist\", [\"int\"])\n    def get_org_top_groups(self, org_id):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def org_user_exists(self, org_id, email):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def is_org_staff(self, org_id, user):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def set_org_staff(self, org_id, user):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def unset_org_staff(self, org_id, user):\n        pass\n\n    @searpc_func(\"int\", [\"int\", \"string\"])\n    def set_org_name(self, org_id, org_name):\n        pass\n\n    @searpc_func(\"string\", [\"string\"])\n    def get_primary_id(self, email):\n        pass\n"
  },
  {
    "path": "python/seaserv/Makefile.am",
    "content": "seaservdir=${pyexecdir}/seaserv\n\nseaserv_PYTHON = __init__.py service.py api.py\n"
  },
  {
    "path": "python/seaserv/__init__.py",
    "content": "from . import service\nfrom .service import seafserv_threaded_rpc, ccnet_threaded_rpc\nfrom .service import send_command, check_quota, web_get_access_token, \\\n    unset_repo_passwd, get_user_quota_usage, get_user_share_usage, \\\n    get_user_quota\nfrom .service import get_emailusers, count_emailusers, \\\n    get_emailuser_with_import\nfrom .service import get_org_groups, get_personal_groups_by_user, \\\n    get_group_repoids, get_personal_groups, list_share_repos, remove_share, \\\n    check_group_staff, remove_group_user, get_group, get_org_id_by_group, \\\n    get_group_members, get_shared_groups_by_repo, is_group_user, \\\n    get_org_group_repos, get_group_repos, get_org_groups_by_user, is_org_group,\\\n    del_org_group_repo, get_org_groups_by_repo, get_org_group_repoids, \\\n    get_group_repos_by_owner, unshare_group_repo\nfrom .service import get_repos, get_repo, get_commits, get_branches, remove_repo, \\\n    get_org_repos, is_repo_owner, create_org_repo, is_inner_pub_repo, \\\n    list_org_inner_pub_repos, get_org_id_by_repo_id, list_org_shared_repos, \\\n    list_personal_shared_repos, is_personal_repo, list_inner_pub_repos, \\\n    is_org_repo_owner, get_org_repo_owner, is_org_repo, get_file_size,\\\n    list_personal_repos_by_owner, get_repo_token_nonnull, get_repo_owner, \\\n    server_repo_size, get_file_id_by_path, get_commit, set_repo_history_limit,\\\n    get_repo_history_limit, list_inner_pub_repos_by_owner, unset_inner_pub_repo,\\\n    count_inner_pub_repos, edit_repo, list_dir_by_path, create_repo, remove_repo\n\nfrom .service import get_binding_peerids, is_valid_filename, check_permission,\\\n    is_passwd_set\nfrom .service import create_org, get_orgs_by_user, get_org_by_url_prefix, \\\n    get_user_current_org, add_org_user, remove_org_user, get_org_by_id, \\\n    get_org_id_by_repo_id, is_org_staff, get_org_users_by_url_prefix, \\\n    org_user_exists, list_org_repos_by_owner\n\nfrom .service import get_related_users_by_repo, get_related_users_by_org_repo\nfrom .service import post_empty_file, del_file\n\nfrom .service import \\\n    MAX_UPLOAD_FILE_SIZE, MAX_DOWNLOAD_DIR_SIZE, FILE_SERVER_ROOT, \\\n    CALC_SHARE_USAGE, FILE_SERVER_PORT, \\\n    SEAFILE_CENTRAL_CONF_DIR, USE_GO_FILESERVER\n\nfrom .service import send_message\n\nfrom .api import seafile_api, ccnet_api\n"
  },
  {
    "path": "python/seaserv/api.py",
    "content": "from .service import seafserv_threaded_rpc, ccnet_threaded_rpc\nfrom pysearpc import SearpcError\nimport json\n\n\"\"\"\nGeneral rules for return values and exception handling of Seafile python API:\n- Read operations return corresponding values. Raises exceptions on parameter errors\n  or I/O errors in seaf-server.\n- Write or set operations return 0 on success, -1 on error. On error, an exceptioin\n  will be raised.\n\nAll paths in parameters can be in absolute path format (like '/test') or\nrelative path format (like 'test'). The API can handle both formats.\n\"\"\"\n\nREPO_STATUS_NORMAL = 0\nREPO_STATUS_READ_ONLY = 1\n\nclass SeafileAPI(object):\n\n    def __init__(self):\n        pass\n\n    # fileserver token\n\n    def get_fileserver_access_token(self, repo_id, obj_id, op, username, use_onetime=True):\n        \"\"\"Generate token for access file/dir in fileserver\n\n        op: the operation, can be 'view', 'download', 'download-dir', 'downloadblks',\n            'upload', 'update', 'upload-blks-api', 'upload-blks-aj',\n            'update-blks-api', 'update-blks-aj'\n\n        Return: the access token in string\n        \"\"\"\n        onetime = 1 if bool(use_onetime) else 0\n        return seafserv_threaded_rpc.web_get_access_token(repo_id, obj_id, op, username,\n                                                          onetime)\n\n    def query_fileserver_access_token(self, token):\n        \"\"\"Get the WebAccess object\n\n        token: the access token in string\n\n        Return: the WebAccess object (lib/webaccess.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.web_query_access_token(token)\n\n    def query_zip_progress(self, token):\n        \"\"\"Query zip progress for download-dir, download-multi\n        token: obtained by get_fileserver_access_token\n        Return: json formated string `{\"zipped\":, \"total\":}`, otherwise None.\n        \"\"\"\n        return seafserv_threaded_rpc.query_zip_progress(token)\n\n    def cancel_zip_task(self, token):\n        return seafserv_threaded_rpc.cancel_zip_task(token)\n\n    # password\n\n    def is_password_set(self, repo_id, username):\n        \"\"\"\n        Return non-zero if True, otherwise 0.\n        \"\"\"\n        return seafserv_threaded_rpc.is_passwd_set(repo_id, username)\n\n    def get_decrypt_key(self, repo_id, username):\n        \"\"\"\n        Return: a CryptKey object (lib/crypt.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_decrypt_key(repo_id, username)\n\n    def change_repo_passwd(self, repo_id, old_passwd, new_passwd, user):\n        return seafserv_threaded_rpc.change_repo_passwd(repo_id, old_passwd,\n                                                        new_passwd, user)\n\n    def upgrade_repo_pwd_hash_algorithm (self, repo_id, user, passwd, pwd_hash_algo, pwd_hash_params):\n        return seafserv_threaded_rpc.upgrade_repo_pwd_hash_algorithm (repo_id, user, passwd,\n                                                                      pwd_hash_algo, pwd_hash_params)\n    def check_passwd(self, repo_id, magic):\n        return seafserv_threaded_rpc.check_passwd(repo_id, magic)\n\n    def set_passwd(self, repo_id, user, passwd):\n        return seafserv_threaded_rpc.set_passwd(repo_id, user, passwd)\n\n    def unset_passwd(self, repo_id, user):\n        return seafserv_threaded_rpc.unset_passwd(repo_id, user)\n\n    def generate_magic_and_random_key(self, enc_version, repo_id, password):\n        return seafserv_threaded_rpc.generate_magic_and_random_key(enc_version, repo_id, password)\n\n    # repo manipulation\n\n    def create_repo(self, name, desc, username, passwd=None, enc_version=2, storage_id=None, pwd_hash_algo=None, pwd_hash_params=None):\n        return seafserv_threaded_rpc.create_repo(name, desc, username, passwd, enc_version, pwd_hash_algo, pwd_hash_params)\n\n    def create_enc_repo(self, repo_id, name, desc, username, magic, random_key, salt, enc_version, pwd_hash=None, pwd_hash_algo=None, pwd_hash_params=None):\n        return seafserv_threaded_rpc.create_enc_repo(repo_id, name, desc, username, magic, random_key, salt, enc_version, pwd_hash, pwd_hash_algo, pwd_hash_params)\n\n    def get_repos_by_id_prefix(self, id_prefix, start=-1, limit=-1):\n        \"\"\"\n        Return: a list of Repo objects\n        \"\"\"\n        return seafserv_threaded_rpc.get_repos_by_id_prefix(id_prefix,\n                                                            start, limit)\n    def get_repo(self, repo_id):\n        \"\"\"\n        Return: a Repo object (lib/repo.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_repo(repo_id)\n\n    def remove_repo(self, repo_id):\n        return seafserv_threaded_rpc.remove_repo(repo_id)\n\n    def get_repo_list(self, start, limit, order_by=None, ret_virt_repo=False):\n        \"\"\"\n        Return: a list of Repo objects (lib/repo.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_repo_list(start, limit, order_by, 1 if ret_virt_repo else 0)\n\n    def count_repos(self):\n        return seafserv_threaded_rpc.count_repos()\n\n    def edit_repo(self, repo_id, name, description, username):\n        return seafserv_threaded_rpc.edit_repo(repo_id, name, description, username)\n\n    def is_repo_owner(self, username, repo_id):\n        \"\"\"\n        Return 1 if True, otherwise 0.\n        \"\"\"\n        return seafserv_threaded_rpc.is_repo_owner(username, repo_id)\n\n    def set_repo_owner(self, email, repo_id):\n        return seafserv_threaded_rpc.set_repo_owner(email, repo_id)\n\n    def get_repo_owner(self, repo_id):\n        \"\"\"\n        Return: repo owner in string\n        \"\"\"\n        return seafserv_threaded_rpc.get_repo_owner(repo_id)\n\n    def get_owned_repo_list(self, username, ret_corrupted=False, start=-1, limit=-1):\n        \"\"\"\n        Return: a list of Repo objects\n        \"\"\"\n        return seafserv_threaded_rpc.list_owned_repos(username,\n                                                      1 if ret_corrupted else 0,\n                                                      start, limit)\n\n    def search_repos_by_name(self, name):\n        return seafserv_threaded_rpc.search_repos_by_name(name)\n\n    def get_orphan_repo_list(self):\n        return seafserv_threaded_rpc.get_orphan_repo_list()\n\n    def get_repo_size(self, repo_id):\n        return seafserv_threaded_rpc.server_repo_size(repo_id)\n\n    def revert_repo(self, repo_id, commit_id, username):\n        return seafserv_threaded_rpc.revert_on_server(repo_id, commit_id, username)\n\n    def diff_commits(self, repo_id, old_commit, new_commit, fold_dir_diff = 1):\n        \"\"\"\n        Return: a list of DiffEntry objects (lib/repo.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_diff(repo_id, old_commit, new_commit, fold_dir_diff)\n\n    def get_commit_list(self, repo_id, offset, limit):\n        \"\"\"\n        Return: a list of Commit objects (lib/commit.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit)\n\n    def get_commit(self, repo_id, repo_version, cmt_id):\n        \"\"\" Get a commit. \"\"\"\n        try:\n            ret = seafserv_threaded_rpc.get_commit(repo_id, repo_version, cmt_id)\n        except SearpcError:\n            ret = None\n        return ret\n\n    def get_system_default_repo_id (self):\n        return seafserv_threaded_rpc.get_system_default_repo_id()\n\n    def get_org_id_by_repo_id (self, repo_id):\n        return seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id)\n\n    def set_repo_status (self, repo_id, status):\n        return seafserv_threaded_rpc.set_repo_status(repo_id, status)\n\n    def get_repo_status (self, repo_id):\n        return seafserv_threaded_rpc.get_repo_status(repo_id)\n\n    # File property and dir listing\n\n    def is_valid_filename(self, repo_id, filename):\n        \"\"\"\n        Return: 0 on invalid; 1 on valid.\n        \"\"\"\n        return seafserv_threaded_rpc.is_valid_filename(repo_id, filename)\n\n    def get_file_size(self, store_id, version, file_id):\n        return seafserv_threaded_rpc.get_file_size(store_id, version, file_id)\n\n    def get_dir_size(self, store_id, version, dir_id):\n        \"\"\"\n        Return the size of a dir. It needs to recursively calculate the size\n        of the dir. It can cause great delay before returning. Use with caution!\n        \"\"\"\n        return seafserv_threaded_rpc.get_dir_size(store_id, version, dir_id)\n\n    def get_file_id_by_path(self, repo_id, path):\n        \"\"\"\n        Returns None if path not found. Only raise exception on parameter or IO error.\n        \"\"\"\n        return seafserv_threaded_rpc.get_file_id_by_path(repo_id, path)\n\n    def get_file_id_by_commit_and_path(self, repo_id, commit_id, path):\n        return seafserv_threaded_rpc.get_file_id_by_commit_and_path(repo_id,\n                                                                    commit_id,\n                                                                    path)\n\n    def get_dirent_by_path(self, repo_id, path):\n        \"\"\"\n        Return: a Dirent object (lib/dirent.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.get_dirent_by_path(repo_id, path)\n\n    def list_file_by_file_id(self, repo_id, file_id, offset=-1, limit=-1):\n        # deprecated, use list_blocks_by_file_id instead.\n        return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit)\n\n    def list_blocks_by_file_id(self, repo_id, file_id, offset=-1, limit=-1):\n        \"\"\"\n        list block ids of a file.\n        Return: a string containing block list. Each id is seperated by '\\n'\n        \"\"\"\n        return seafserv_threaded_rpc.list_file_blocks(repo_id, file_id, offset, limit)\n    \n    def get_dir_id_by_path(self, repo_id, path):\n        return seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)\n\n    def list_dir_by_dir_id(self, repo_id, dir_id, offset=-1, limit=-1):\n        \"\"\"\n        Return: a list of Dirent objects. The objects are sorted as follows:\n                - Directories are always before files\n                - Entries are sorted by names in ascending order\n        \"\"\"\n        return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)\n\n    def list_dir_by_path(self, repo_id, path, offset=-1, limit=-1):\n        dir_id = seafserv_threaded_rpc.get_dir_id_by_path(repo_id, path)\n        if dir_id is None:\n            return None\n        return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)\n\n    def list_dir_by_commit_and_path(self, repo_id,\n                                    commit_id, path, offset=-1, limit=-1):\n        dir_id = seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path)\n        if dir_id is None:\n            return None\n        return seafserv_threaded_rpc.list_dir(repo_id, dir_id, offset, limit)\n    \n    def get_dir_id_by_commit_and_path(self, repo_id, commit_id, path):\n        return seafserv_threaded_rpc.get_dir_id_by_commit_and_path(repo_id, commit_id, path)\n\n    def list_dir_with_perm(self, repo_id, dir_path, dir_id, user, offset=-1, limit=-1):\n        return seafserv_threaded_rpc.list_dir_with_perm (repo_id, dir_path, dir_id, user, offset, limit)\n\n    def mkdir_with_parents (self, repo_id, parent_dir, relative_path, username):\n        return seafserv_threaded_rpc.mkdir_with_parents(repo_id, parent_dir, relative_path, username)\n\n    def get_file_count_info_by_path(self, repo_id, path):\n        return seafserv_threaded_rpc.get_file_count_info_by_path(repo_id, path)\n\n    def get_total_storage (self):\n        return seafserv_threaded_rpc.get_total_storage()\n\n    def get_total_file_number (self):\n        return seafserv_threaded_rpc.get_total_file_number()\n\n    # file/dir operations\n\n    def post_file(self, repo_id, tmp_file_path, parent_dir, filename, username):\n        \"\"\"Add a file to a directory\"\"\"\n        return seafserv_threaded_rpc.post_file(repo_id, tmp_file_path, parent_dir,\n                                               filename, username)\n\n    def post_empty_file(self, repo_id, parent_dir, filename, username):\n        return seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir,\n                                                     filename, username)\n\n    def put_file(self, repo_id, tmp_file_path, parent_dir, filename,\n                 username, head_id):\n        \"\"\"Update an existing file\n\n        head_id: the original commit id of the old file\n        \"\"\"\n        return seafserv_threaded_rpc.put_file(repo_id, tmp_file_path, parent_dir,\n                                              filename, username, head_id)\n\n    '''\n    If you want to delete multiple files in a batch, @filename should be json array\n    '''\n    def del_file(self, repo_id, parent_dir, filename, username):\n        return seafserv_threaded_rpc.del_file(repo_id, parent_dir, filename, username)\n\n    def batch_del_files(self, repo_id, filepaths, username):\n        return seafserv_threaded_rpc.batch_del_files(repo_id, filepaths, username)\n\n    '''\n    If you want to move or copy multiple files in a batch, @src_filename and @dst_filename\n    should be json array, make sure the number of files\n    in @src_filename and @dst_filename parameters match\n    '''\n    def copy_file(self, src_repo, src_dir, src_filename, dst_repo,\n                  dst_dir, dst_filename, username, need_progress, synchronous=0):\n        return seafserv_threaded_rpc.copy_file(src_repo, src_dir, src_filename,\n                                               dst_repo, dst_dir, dst_filename,\n                                               username, need_progress, synchronous)\n\n    def move_file(self, src_repo, src_dir, src_filename, dst_repo, dst_dir,\n                  dst_filename, replace, username, need_progress, synchronous=0):\n        return seafserv_threaded_rpc.move_file(src_repo, src_dir, src_filename,\n                                               dst_repo, dst_dir, dst_filename,\n                                               replace, username, need_progress, synchronous)\n\n    def get_copy_task(self, task_id):\n        return seafserv_threaded_rpc.get_copy_task(task_id)\n\n    def cancel_copy_task(self, task_id):\n        return seafserv_threaded_rpc.cancel_copy_task(task_id)\n\n    def rename_file(self, repo_id, parent_dir, oldname, newname, username):\n        return seafserv_threaded_rpc.rename_file(repo_id, parent_dir,\n                                                 oldname, newname, username)\n\n    def post_dir(self, repo_id, parent_dir, dirname, username):\n        \"\"\"Add a directory\"\"\"\n        return seafserv_threaded_rpc.post_dir(repo_id, parent_dir, dirname, username)\n\n    def revert_file(self, repo_id, commit_id, path, username):\n        return seafserv_threaded_rpc.revert_file(repo_id, commit_id, path, username)\n\n    def revert_dir(self, repo_id, commit_id, path, username):\n        return seafserv_threaded_rpc.revert_dir(repo_id, commit_id, path, username)\n\n    def get_deleted(self, repo_id, show_days, path='/', scan_stat=None, limit=100):\n        \"\"\"\n        Get list of deleted paths.\n\n        @show_days: return deleted path in the last @show_days\n        @path: return deleted files under this path. The path will be recursively traversed.\n        @scan_stat: An opaque status returned by the last call. In the first call, None\n                    must be passed. The last entry of the result list contains a 'scan_stat'\n                    attribute. In the next call, pass in the returned 'scan_stat'.\n        @limit: Advisory maximum number of commits to traverse. Sometimes more than @limit\n                commits will be traversed.\n\n        Return a list of DeletedEntry objects (lib/repo.vala).\n        If no more deleted entries can be returned within the given time frame (specified by\n        @show_days) or all deleted entries in the history have been returned, a list with a\n        single entry will be returned. The 'scan_stat' attribute of this entry is set to\n        None.\n        \"\"\"\n        return seafserv_threaded_rpc.get_deleted(repo_id, show_days, path, scan_stat, limit)\n\n    def get_file_revisions(self, repo_id, commit_id, path, limit):\n        \"\"\"\n        Get revisions of a file.\n\n        @commit_id: start traversing from this commit\n        @limit: maximum number of commits to traverse when looking for revisions\n\n        Return a list of Commit objects (lib/commit.vala) related to the revisions.\n        A few special attributes are added to the commit object:\n        @rev_file_id: id of the file revision\n        @rev_file_size: size of the file revision\n        @rev_renamed_old_path: set if this revision is made by a rename operation.\n                               It's set to the old path before rename.\n        @next_start_commit: commit_id for next page. An extra commit which only contains @next_start_commit\n                            will be appended to the list.\n        \"\"\"\n        return seafserv_threaded_rpc.list_file_revisions(repo_id, commit_id, path, limit)\n\n    # This api is slow and should only be used for version 0 repos.\n    def get_files_last_modified(self, repo_id, parent_dir, limit):\n        \"\"\"Get last modification time for files in a dir\n\n        limit: the max number of commits to analyze\n        \"\"\"\n        return seafserv_threaded_rpc.calc_files_last_modified(repo_id,\n                                                              parent_dir, limit)\n\n    def get_repo_history_limit(self, repo_id):\n        \"\"\"\n        Return repo history limit in days. Returns -1 if it's unlimited.\n        \"\"\"\n        return seafserv_threaded_rpc.get_repo_history_limit(repo_id)\n\n    def set_repo_history_limit(self, repo_id, days):\n        \"\"\"\n        Set repo history limit in days. Pass -1 if set to unlimited.\n        \"\"\"\n        return seafserv_threaded_rpc.set_repo_history_limit(repo_id, days)\n\n    def set_repo_valid_since(self, repo_id, timestamp):\n        return seafserv_threaded_rpc.set_repo_valid_since(repo_id, timestamp)\n\n    def check_repo_blocks_missing(self, repo_id, blklist):\n        return seafserv_threaded_rpc.check_repo_blocks_missing(repo_id, blklist)\n\n    def get_upload_tmp_file_offset (self, repo_id, file_path):\n        return seafserv_threaded_rpc.get_upload_tmp_file_offset (repo_id, file_path)\n\n    # file lock\n    def check_file_lock(self, repo_id, path, user):\n        \"\"\"\n        Always return 0 since CE doesn't support file locking.\n        \"\"\"\n        return 0\n\n    # share repo to user\n    def share_repo(self, repo_id, from_username, to_username, permission):\n        return seafserv_threaded_rpc.add_share(repo_id, from_username,\n                                               to_username, permission)\n\n    def remove_share(self, repo_id, from_username, to_username):\n        return seafserv_threaded_rpc.remove_share(repo_id, from_username,\n                                                  to_username)\n    \n    def set_share_permission(self, repo_id, from_username, to_username, permission):\n        return seafserv_threaded_rpc.set_share_permission(repo_id, from_username,\n                                                          to_username, permission)\n\n    def share_subdir_to_user(self, repo_id, path, owner, share_user, permission, passwd=''):\n        return seafserv_threaded_rpc.share_subdir_to_user(repo_id, path, owner,\n                                                          share_user, permission, passwd)\n\n    def unshare_subdir_for_user(self, repo_id, path, owner, share_user):\n        return seafserv_threaded_rpc.unshare_subdir_for_user(repo_id, path, owner,\n                                                             share_user)\n\n    def update_share_subdir_perm_for_user(self, repo_id, path, owner,\n                                          share_user, permission):\n        return seafserv_threaded_rpc.update_share_subdir_perm_for_user(repo_id, path, owner,\n                                                                       share_user, permission)\n\n    def get_shared_repo_by_path(self, repo_id, path, shared_to, is_org=False):\n        \"\"\"\n        If path is NULL, 'repo_id' represents for the repo we want,\n        otherwise, 'repo_id' represents for the origin repo, return virtual repo\n        \"\"\"\n        return seafserv_threaded_rpc.get_shared_repo_by_path(repo_id, path, shared_to, 1 if is_org else 0)\n\n    def get_share_out_repo_list(self, username, start, limit):\n        \"\"\"\n        Get repo list shared by this user.\n        Return: a list of Repo objects\n        \"\"\"\n        return seafserv_threaded_rpc.list_share_repos(username, \"from_email\",\n                                                      start, limit)\n\n    def get_share_in_repo_list(self, username, start, limit):\n        \"\"\"\n        Get repo list shared to this user.\n        \"\"\"\n        return seafserv_threaded_rpc.list_share_repos(username, \"to_email\",\n                                                      start, limit)\n\n    def list_repo_shared_to(self, from_user, repo_id):\n        \"\"\"\n        Get user list this repo is shared to.\n        Return: a list of SharedUser objects (lib/repo.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.list_repo_shared_to(from_user, repo_id)\n\n    def repo_has_been_shared(self, repo_id, including_groups=False):\n        return True if seafserv_threaded_rpc.repo_has_been_shared(repo_id, 1 if including_groups else 0) else False\n\n    # share repo to group\n    def group_share_repo(self, repo_id, group_id, username, permission):\n        # deprecated, use ``set_group_repo``\n        return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,\n                                                      username, permission)\n\n    def set_group_repo(self, repo_id, group_id, username, permission):\n        return seafserv_threaded_rpc.group_share_repo(repo_id, group_id,\n                                                      username, permission)\n\n    def group_unshare_repo(self, repo_id, group_id, username):\n        # deprecated, use ``unset_group_repo``\n        return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)\n\n    def unset_group_repo(self, repo_id, group_id, username):\n        return seafserv_threaded_rpc.group_unshare_repo(repo_id, group_id, username)\n\n    def get_shared_group_ids_by_repo(self, repo_id):\n        group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)\n\n        if not group_ids:\n            return []\n\n        ret = []\n        for group_id in group_ids.split('\\n'):\n            if not group_id:\n                continue\n            ret.append(group_id)\n        return ret\n\n    def list_repo_shared_group(self, from_user, repo_id):\n        # deprecated, use list_repo_shared_group_by_user instead.\n        return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id)\n\n    def get_group_shared_repo_by_path (self, repo_id, path, group_id, is_org=False):\n        \"\"\"\n        If path is NULL, 'repo_id' represents for the repo we want,\n        otherwise, 'repo_id' represents for the origin repo, return virtual repo\n        \"\"\"\n        return seafserv_threaded_rpc.get_group_shared_repo_by_path(repo_id, path, group_id, 1 if is_org else 0)\n\n    def get_group_repos_by_user (self, user):\n        \"\"\"\n        Return all the repos in all groups that the @user belongs to.\n        \"\"\"\n        return seafserv_threaded_rpc.get_group_repos_by_user(user)\n\n    def get_org_group_repos_by_user (self, user, org_id):\n        return seafserv_threaded_rpc.get_org_group_repos_by_user(user, org_id)\n\n    def list_repo_shared_group_by_user(self, from_user, repo_id):\n        \"\"\"\n        Return: a list of SharedGroup objects (lib/repo.vala)\n        \"\"\"\n        return seafserv_threaded_rpc.list_repo_shared_group(from_user, repo_id)\n\n    def share_subdir_to_group(self, repo_id, path, owner, share_group, permission, passwd=''):\n        return seafserv_threaded_rpc.share_subdir_to_group(repo_id, path, owner,\n                                                           share_group, permission, passwd)\n\n    def unshare_subdir_for_group(self, repo_id, path, owner, share_group):\n        return seafserv_threaded_rpc.unshare_subdir_for_group(repo_id, path, owner,\n                                                              share_group)\n\n    def update_share_subdir_perm_for_group(self, repo_id, path, owner,\n                                           share_group, permission):\n        return seafserv_threaded_rpc.update_share_subdir_perm_for_group(repo_id, path, owner,\n                                                                        share_group, permission)\n\n    def get_group_repoids(self, group_id):\n        \"\"\"\n        Return the list of group repo ids\n        \"\"\"\n        repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)\n        if not repo_ids:\n            return []\n        l = []\n        for repo_id in repo_ids.split(\"\\n\"):\n            if repo_id == '':\n                continue\n            l.append(repo_id)\n        return l\n\n    def get_group_repo_list(self, group_id):\n        # deprecated, use get_repos_by_group instead.\n        ret = []\n        for repo_id in self.get_group_repoids(group_id):\n            r = self.get_repo(repo_id)\n            if r is None:\n                continue\n            ret.append(r)\n        return ret\n\n    def get_repos_by_group(self, group_id):\n        \"\"\"\n        Return: a list of Repo objects\n        \"\"\"\n        return seafserv_threaded_rpc.get_repos_by_group(group_id)\n\n    def get_group_repos_by_owner(self, username):\n        \"\"\"\n        Get all repos a user share to any group\n        Return: a list of Repo objects\n        \"\"\"\n        return seafserv_threaded_rpc.get_group_repos_by_owner(username)\n\n    def remove_group_repos_by_owner(self, group_id, username):\n        \"\"\"\n        Unshare all repos a user shared to a group.\n        \"\"\"\n        return seafserv_threaded_rpc.remove_repo_group(group_id, username)\n\n    def remove_group_repos(self, group_id):\n        \"\"\"\n        Remove all repos under group.\n        Return: 0 success; -1 failed\n        \"\"\"\n        return seafserv_threaded_rpc.remove_repo_group(group_id, None)\n\n    def set_group_repo_permission(self, group_id, repo_id, permission):\n        return seafserv_threaded_rpc.set_group_repo_permission(group_id, repo_id,\n                                                               permission)\n\n    def get_shared_users_for_subdir(self, repo_id, path, from_user):\n        \"\"\"\n        Get all users a path is shared to.\n        Return: a list of SharedUser objects.\n        \"\"\"\n        return seafserv_threaded_rpc.get_shared_users_for_subdir(repo_id, path, from_user)\n\n    def get_shared_groups_for_subdir(self, repo_id, path, from_user):\n        \"\"\"\n        Get all groups a path is shared to.\n        Return: a list of SharedGroup objects.\n        \"\"\"\n        return seafserv_threaded_rpc.get_shared_groups_for_subdir(repo_id, path, from_user)\n\n    def get_shared_users_by_repo(self, repo_id):\n        users = []\n        # get users that the repo is shared to\n        shared_users = seafserv_threaded_rpc.get_shared_users_by_repo (repo_id)\n        for user in shared_users:\n            users.append(user.user)\n\n        # get users in groups that the repo is shared to\n        group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)\n        if not group_ids:\n            return users\n\n        ids = []\n        for group_id in group_ids.split('\\n'):\n            if not group_id:\n                continue\n            ids.append(int(group_id))\n\n        json_ids = json.dumps(ids)\n        group_users = ccnet_threaded_rpc.get_groups_members(json_ids)\n\n        for user in group_users:\n            if user.user_name not in users:\n                users.append(user.user_name)\n\n        return users\n\n    # organization wide repo\n    def add_inner_pub_repo(self, repo_id, permission):\n        return seafserv_threaded_rpc.set_inner_pub_repo(repo_id, permission)\n\n    def remove_inner_pub_repo(self, repo_id):\n        return seafserv_threaded_rpc.unset_inner_pub_repo(repo_id)\n\n    def get_inner_pub_repo_list(self):\n        \"\"\"\n        Return: a list of Repo objects.\n        \"\"\"\n        return seafserv_threaded_rpc.list_inner_pub_repos()\n\n    def list_inner_pub_repos_by_owner(self, repo_owner):\n        \"\"\"\n        Return: a list of Repo objects.\n        \"\"\"\n        return seafserv_threaded_rpc.list_inner_pub_repos_by_owner(repo_owner)\n\n    def count_inner_pub_repos(self):\n        return seafserv_threaded_rpc.count_inner_pub_repos()\n\n    def is_inner_pub_repo(self, repo_id):\n        return seafserv_threaded_rpc.is_inner_pub_repo(repo_id)\n\n    # permission checks\n    def check_permission(self, repo_id, user):\n        \"\"\"\n        Check repo share permissions. Only check user share, group share and inner-pub\n        shares.\n        Return: 'r', 'rw', or None\n        \"\"\"\n        return seafserv_threaded_rpc.check_permission(repo_id, user)\n\n    def check_permission_by_path(self, repo_id, path, user):\n        \"\"\"\n        Check both repo share permission and sub-folder access permissions.\n        This function should be used when updating file/folder in a repo.\n        In CE, this function is equivalent to check_permission.\n        Return: 'r', 'rw', or None\n        \"\"\"\n        return seafserv_threaded_rpc.check_permission_by_path(repo_id, path, user)\n\n    def is_repo_syncable(self, repo_id, user, repo_perm):\n        \"\"\"\n        Check if the permission of the repo is syncable.\n        \"\"\"\n        return '{\"is_syncable\":true}'\n\n    def is_dir_downloadable(self, repo_id, dir_path, user, repo_perm):\n        \"\"\"\n        Check if the permission of the dir is downloadable.\n        {\"is_downloadable\": false, \"undownloadable_path\":\"path\"}\n        - is_downloadable: true if the dir is downloadable, false if not.\n        - undownloadable_path: the undownloadable path of the repo if the path is not downloadable.\n        \"\"\"\n        return '{\"is_downloadable\":true}'\n\n    # token\n    def generate_repo_token(self, repo_id, username):\n        \"\"\"Generate a token for sync a repo\n        \"\"\"\n        return seafserv_threaded_rpc.generate_repo_token(repo_id, username)\n\n    def delete_repo_token(self, repo_id, token, user):\n        return seafserv_threaded_rpc.delete_repo_token(repo_id, token, user)\n\n    def list_repo_tokens(self, repo_id):\n        \"\"\"\n        Return: a list of RepoTokenInfo objects.\n        \"\"\"\n        return seafserv_threaded_rpc.list_repo_tokens(repo_id)\n\n    def list_repo_tokens_by_email(self, username):\n        return seafserv_threaded_rpc.list_repo_tokens_by_email(username)\n\n    def delete_repo_tokens_by_peer_id(self, email, peer_id):\n        return seafserv_threaded_rpc.delete_repo_tokens_by_peer_id(email, peer_id)\n\n    def delete_repo_tokens_by_email(self, email):\n        return seafserv_threaded_rpc.delete_repo_tokens_by_email(email)\n\n    # quota\n    def get_user_self_usage(self, username):\n        \"\"\"Get the sum of repos' size of the user\"\"\"\n        return seafserv_threaded_rpc.get_user_quota_usage(username)\n\n    def get_user_share_usage(self, username):\n        # sum (repo_size * number_of_shares)\n        return seafserv_threaded_rpc.get_user_share_usage(username)\n\n    def get_user_quota(self, username):\n        \"\"\"\n        Return: -2 if quota is unlimited; otherwise it must be number > 0.\n        \"\"\"\n        return seafserv_threaded_rpc.get_user_quota(username)\n\n    def set_user_quota(self, username, quota):\n        return seafserv_threaded_rpc.set_user_quota(username, quota)\n\n    def get_user_share_quota(self, username):\n        return -2               # unlimited\n\n    def set_user_share_quota(self, username, quota):\n        pass\n\n    def check_quota(self, repo_id, delta=0):\n        return seafserv_threaded_rpc.check_quota(repo_id, delta)\n\n    def list_user_quota_usage(self):\n        return seafserv_threaded_rpc.list_user_quota_usage()\n\n    # virtual repo\n    def create_virtual_repo(self, origin_repo_id, path, repo_name, repo_desc, owner, passwd=''):\n        return seafserv_threaded_rpc.create_virtual_repo(origin_repo_id,\n                                                         path,\n                                                         repo_name,\n                                                         repo_desc,\n                                                         owner,\n                                                         passwd)\n\n    def get_virtual_repos_by_owner(self, owner):\n        return seafserv_threaded_rpc.get_virtual_repos_by_owner(owner)\n\n    def get_virtual_repo(self, origin_repo, path, owner):\n        return seafserv_threaded_rpc.get_virtual_repo(origin_repo, path, owner)\n\n    # Clean trash\n\n    def clean_up_repo_history(self, repo_id, keep_days):\n        return seafserv_threaded_rpc.clean_up_repo_history(repo_id, keep_days)\n\n    # Trashed repos\n    def get_trash_repo_list(self, start, limit):\n        return seafserv_threaded_rpc.get_trash_repo_list(start, limit)\n\n    def del_repo_from_trash(self, repo_id):\n        return seafserv_threaded_rpc.del_repo_from_trash(repo_id)\n\n    def restore_repo_from_trash(self, repo_id):\n        return seafserv_threaded_rpc.restore_repo_from_trash(repo_id)\n\n    def get_trash_repos_by_owner(self, owner):\n        return seafserv_threaded_rpc.get_trash_repos_by_owner(owner)\n\n    def get_trash_repo_owner (self, repo_id):\n        return seafserv_threaded_rpc.get_trash_repo_owner(repo_id)\n\n    def empty_repo_trash(self):\n        return seafserv_threaded_rpc.empty_repo_trash()\n\n    def empty_repo_trash_by_owner(self, owner):\n        return seafserv_threaded_rpc.empty_repo_trash_by_owner(owner)\n\n    # Server config\n    def get_server_config_int (self, group, key):\n        return seafserv_threaded_rpc.get_server_config_int (group, key)\n\n    def set_server_config_int (self, group, key, value):\n        return seafserv_threaded_rpc.set_server_config_int (group, key, value)\n\n    def get_server_config_int64 (self, group, key):\n        return seafserv_threaded_rpc.get_server_config_int64 (group, key)\n\n    def set_server_config_int64 (self, group, key, value):\n        return seafserv_threaded_rpc.set_server_config_int64 (group, key, value)\n\n    def get_server_config_string (self, group, key):\n        return seafserv_threaded_rpc.get_server_config_string (group, key)\n\n    def set_server_config_string (self, group, key, value):\n        return seafserv_threaded_rpc.set_server_config_string (group, key, value)\n\n    def get_server_config_boolean (self, group, key):\n        return bool(seafserv_threaded_rpc.get_server_config_boolean (group, key))\n\n    def set_server_config_boolean (self, group, key, value):\n        i_value = 1 if bool(value) else 0\n        return seafserv_threaded_rpc.set_server_config_boolean (group, key, i_value)\n\n    def del_org_group_repo(self, repo_id, org_id, group_id):\n        seafserv_threaded_rpc.del_org_group_repo(repo_id, org_id, group_id)\n\n    def org_get_shared_users_by_repo(self, org_id, repo_id):\n        users = []\n        # get users that the repo is shared to\n        shared_users = seafserv_threaded_rpc.org_get_shared_users_by_repo(org_id, repo_id)\n        for user in shared_users:\n            users.append(user.user)\n\n        # get users in groups that the repo is shared to\n        group_ids = seafserv_threaded_rpc.get_org_groups_by_repo(org_id, repo_id)\n        if not group_ids:\n            return users\n\n        ids = []\n        for group_id in group_ids.split('\\n'):\n            if not group_id:\n                continue\n            ids.append(int(group_id))\n\n        json_ids = json.dumps(ids)\n        group_users = ccnet_threaded_rpc.get_groups_members(json_ids)\n\n        for user in group_users:\n            if user.user_name not in users:\n                users.append(user.user_name)\n\n        return users\n\n    def list_org_inner_pub_repos(self, org_id):\n        return seafserv_threaded_rpc.list_org_inner_pub_repos(org_id)\n\n    def convert_repo_path(self, repo_id, path, user, is_org=False):\n        return seafserv_threaded_rpc.convert_repo_path(repo_id, path, user, 1 if is_org else 0)\n\n    def publish_event(self, channel, content):\n        return seafserv_threaded_rpc.publish_event(channel, content)\n\n    def pop_event(self, channel):\n        return seafserv_threaded_rpc.pop_event(channel)\n\n    def search_files(self, repo_id, search_str):\n        return seafserv_threaded_rpc.search_files(repo_id, search_str)\n\n    def search_files_by_path (self, repo_id, path, search_str):\n        return seafserv_threaded_rpc.search_files_by_path(repo_id, path, search_str)\n\nseafile_api = SeafileAPI()\n\nclass CcnetAPI(object):\n\n    def __init__(self):\n        pass\n\n    # user management\n    def add_emailuser(self, email, passwd, is_staff, is_active):\n        return ccnet_threaded_rpc.add_emailuser(email, passwd, is_staff, is_active)\n    \n    def remove_emailuser(self, source, email):\n        \"\"\"\n        source can be 'DB' or 'LDAP'.\n        - 'DB': remove a user created in local database\n        - 'LDAP': remove a user imported from LDAP\n        \"\"\"\n        return ccnet_threaded_rpc.remove_emailuser(source, email)\n    \n    def validate_emailuser(self, email, passwd):\n        \"\"\"\n        Verify user's password on login. Can be used to verify DB and LDAP users.\n        The function first verify password with LDAP, then local database.\n        \"\"\"\n        return ccnet_threaded_rpc.validate_emailuser(email, passwd)\n\n    def get_emailuser(self, email):\n        \"\"\"\n        Only return local database user or imported LDAP user.\n        It first lookup user from local database, if not found, lookup imported\n        LDAP user.\n        Return: a list of EmailUser objects (ccnet/lib/ccnetobj.vala)\n        The 'source' attribute of EmailUser object is set to 'LDAPImport' for LDAP\n        imported user, and 'DB' for local database user.\n        \"\"\"\n        return ccnet_threaded_rpc.get_emailuser(email)\n\n    def get_emailuser_with_import(self, email):\n        \"\"\"\n        The same as get_emailuser() but import the user from LDAP if it was not\n        imported yet.\n        \"\"\"\n        return ccnet_threaded_rpc.get_emailuser_with_import(email)\n\n    def get_emailuser_by_id(self, user_id):\n        \"\"\"\n        Get a user from local database with the db index id.\n        \"\"\"\n        return ccnet_threaded_rpc.get_emailuser_by_id(user_id)\n\n    def get_emailusers(self, source, start, limit, is_active=None):\n        \"\"\"\n        source:\n          - 'DB': return local db users\n          - 'LDAPImport': return imported LDAP users\n          - 'LDAP': retrieve users directly from LDAP server\n        start: offset to start retrieving, -1 to start from the beginning\n        limit: number of users to get, -1 to get all user from start\n        is_active: True to return only active users; False to return inactive users;\n                   None to return all users.\n        Return: a list of EmailUser objects.\n        \"\"\"\n        if is_active is True:\n            status = \"active\"       # list active users\n        elif is_active is False:\n            status = \"inactive\"     # list inactive users\n        else:\n            status = \"\"             # list all users\n\n        return ccnet_threaded_rpc.get_emailusers(source, start, limit, status)\n\n    def search_emailusers(self, source, email_patt, start, limit):\n        \"\"\"\n        Search for users whose name contains @email_patt.\n        source: 'DB' for local db users; 'LDAP' for imported LDAP users.\n                This function cannot search LDAP users directly in LDAP server.\n        \"\"\"\n        return ccnet_threaded_rpc.search_emailusers(source, email_patt, start, limit)\n\n    def search_groups(self, group_patt, start, limit):\n        \"\"\"\n        Search for groups whose name contains @group_patt.\n        \"\"\"\n        return ccnet_threaded_rpc.search_groups(group_patt, start, limit)\n    \n    def search_group_members(self, group_id, pattern):\n        return ccnet_threaded_rpc.search_group_members(group_id, pattern)\n\n    def get_top_groups(self, including_org=False):\n        return ccnet_threaded_rpc.get_top_groups(1 if including_org else 0)\n\n    def get_child_groups(self, group_id):\n        return ccnet_threaded_rpc.get_child_groups(group_id)\n\n    def get_descendants_groups(self, group_id):\n        return ccnet_threaded_rpc.get_descendants_groups(group_id)\n\n    def get_ancestor_groups(self, group_id):\n        return ccnet_threaded_rpc.get_ancestor_groups(group_id)\n\n    def count_emailusers(self, source):\n        \"\"\"\n        Return the number of active users by source.\n        source: 'DB' for local db users; 'LDAP' for imported LDAP users.\n        \"\"\"\n        return ccnet_threaded_rpc.count_emailusers(source)\n\n    def count_inactive_emailusers(self, source):\n        \"\"\"\n        Return the number of inactive users by source.\n        source: 'DB' for local db users; 'LDAP' for imported LDAP users.\n        \"\"\"\n        return ccnet_threaded_rpc.count_inactive_emailusers(source)\n\n    def update_emailuser(self, source, user_id, password, is_staff, is_active):\n        \"\"\"\n        source: 'DB' for local db user; 'LDAP' for imported LDAP user.\n        user_id: usually not changed.\n        password: new password in plain text. Only effective for DB users.\n                  If '!' is passed, the password won't be updated.\n        is_staff: change superuser status\n        is_active: activate or deactivate user\n        \"\"\"\n        return ccnet_threaded_rpc.update_emailuser(source, user_id, password, is_staff, is_active)\n\n    def update_role_emailuser(self, email, role, is_manual_set=True):\n        return ccnet_threaded_rpc.update_role_emailuser(email, role)\n\n    def get_superusers(self):\n        \"\"\"\n        Return: a list of EmailUser objects.\n        \"\"\"\n        return ccnet_threaded_rpc.get_superusers()\n\n    def get_emailusers_in_list(self, source, user_list):\n        \"\"\"\n        @source: 'DB' or 'LDAP'\n        @user_list: json '[user1, user2, user3,...]'\n        \"\"\"\n        return ccnet_threaded_rpc.get_emailusers_in_list(source, user_list)\n\n    def update_emailuser_id (self, old_email, new_email):\n        return ccnet_threaded_rpc.update_emailuser_id (old_email, new_email)\n\n    # group management\n    def create_group(self, group_name, user_name, gtype=None, parent_group_id=0):\n        \"\"\"\n        For CE, gtype is not used and should always be None.\n        \"\"\"\n        return ccnet_threaded_rpc.create_group(group_name, user_name, gtype, parent_group_id)\n\n    def create_org_group(self, org_id, group_name, user_name, parent_group_id=0):\n        return ccnet_threaded_rpc.create_org_group(org_id, group_name, user_name, parent_group_id)\n    \n    def remove_group(self, group_id):\n        \"\"\"\n        permission check should be done before calling this function.\n        \"\"\"\n        return ccnet_threaded_rpc.remove_group(group_id)\n\n    def group_add_member(self, group_id, user_name, member_name):\n        \"\"\"\n        user_name: unused.\n        \"\"\"\n        return ccnet_threaded_rpc.group_add_member(group_id, user_name, member_name)\n    \n    def group_remove_member(self, group_id, user_name, member_name):\n        \"\"\"\n        user_name: unused.\n        \"\"\"\n        return ccnet_threaded_rpc.group_remove_member(group_id, user_name, member_name)\n\n    def group_set_admin(self, group_id, member_name):\n        \"\"\"\n        No effect if member_name is not in the group.\n        \"\"\"\n        return ccnet_threaded_rpc.group_set_admin(group_id, member_name)\n\n    def group_unset_admin(self, group_id, member_name):\n        \"\"\"\n        No effect if member_name is not in the group.\n        \"\"\"\n        return ccnet_threaded_rpc.group_unset_admin(group_id, member_name)\n\n    def set_group_name(self, group_id, group_name):\n        return ccnet_threaded_rpc.set_group_name(group_id, group_name)\n    \n    def quit_group(self, group_id, user_name):\n        return ccnet_threaded_rpc.quit_group(group_id, user_name)\n\n    def get_groups(self, user_name, return_ancestors=False):\n        \"\"\"\n        Get all groups the user belongs to.\n        Return: a list of Group objects (ccnet/lib/ccnetobj.vala)\n        \"\"\"\n        return ccnet_threaded_rpc.get_groups(user_name, 1 if return_ancestors else 0)\n\n    def get_all_groups(self, start, limit, source=None):\n        \"\"\"\n        For CE, source is not used and should alwasys be None.\n        \"\"\"\n        return ccnet_threaded_rpc.get_all_groups(start, limit, source)\n    \n    def get_group(self, group_id):\n        return ccnet_threaded_rpc.get_group(group_id)\n\n    def get_group_members(self, group_id, start=-1, limit=-1):\n        \"\"\"\n        Return a list of GroupUser objects (ccnet/lib/ccnetobj.vala)\n        \"\"\"\n        return ccnet_threaded_rpc.get_group_members(group_id, start, limit)\n\n    def get_members_with_prefix (self, group_id, prefix=None):\n        \"\"\"\n        Return a list of GroupUser objects\n        \"\"\"\n        return ccnet_threaded_rpc.get_members_with_prefix(group_id, prefix)\n\n    def check_group_staff(self, group_id, username, in_structure=False):\n        \"\"\"\n        Return non-zero value if true, 0 if not true\n        \"\"\"\n        return ccnet_threaded_rpc.check_group_staff(group_id, username, 1 if in_structure else 0)\n\n    def remove_group_user(self, username):\n        return ccnet_threaded_rpc.remove_group_user(username)\n    \n    def is_group_user(self, group_id, user, in_structure=True):\n        \"\"\"\n        Return non-zero value if true, 0 if not true\n        If @in_structure is true, return whether user is in descendants groups and @group_id it self \n        \"\"\"\n        return ccnet_threaded_rpc.is_group_user(group_id, user, 1 if in_structure else 0)\n\n    def set_group_creator(self, group_id, user_name):\n        return ccnet_threaded_rpc.set_group_creator(group_id, user_name)\n\n    # organization management\n    def create_org(self, org_name, url_prefix, creator):\n        return ccnet_threaded_rpc.create_org(org_name, url_prefix, creator)\n\n    def remove_org(self, org_id):\n        return ccnet_threaded_rpc.remove_org(org_id)\n    \n    def get_all_orgs(self, start, limit):\n        \"\"\"\n        Return a list of Organization objects (ccnet/lib/ccnetobj.vala)\n        \"\"\"\n        return ccnet_threaded_rpc.get_all_orgs(start, limit)\n\n    def count_orgs(self):\n        return ccnet_threaded_rpc.count_orgs()\n\n    def get_org_by_url_prefix(self, url_prefix):\n        \"\"\"\n        Return an Organizaion object.\n        \"\"\"\n        return ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)\n\n    def get_org_by_id(self, org_id):\n        return ccnet_threaded_rpc.get_org_by_id(org_id)\n    \n    def add_org_user(self, org_id, email, is_staff):\n        return ccnet_threaded_rpc.add_org_user(org_id, email, is_staff)\n\n    def remove_org_user(self, org_id, email):\n        return ccnet_threaded_rpc.remove_org_user(org_id, email)\n    \n    def get_orgs_by_user(self, email):\n        return ccnet_threaded_rpc.get_orgs_by_user(email)\n    \n    def get_org_emailusers(self, url_prefix, start, limit):\n        \"\"\"\n        Return a list of EmailUser objects.\n        \"\"\"\n        return ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit)\n\n    def add_org_group(self, org_id, group_id):\n        return ccnet_threaded_rpc.add_org_group(org_id, group_id)\n\n    def remove_org_group(self, org_id, group_id):\n        return ccnet_threaded_rpc.remove_org_group(org_id, group_id)\n\n    def is_org_group(self, group_id):\n        \"\"\"\n        Return non-zero if True, otherwise 0.\n        \"\"\"\n        return ccnet_threaded_rpc.is_org_group(group_id)\n\n    def get_org_id_by_group(self, group_id):\n        return ccnet_threaded_rpc.get_org_id_by_group(group_id)\n    \n    def get_org_groups(self, org_id, start, limit):\n        \"\"\"\n        Return a list of int, each int is group id.\n        \"\"\"\n        return ccnet_threaded_rpc.get_org_groups(org_id, start, limit)\n    \n    def get_org_top_groups(self, org_id):\n        return ccnet_threaded_rpc.get_org_top_groups(org_id)\n\n    def org_user_exists(self, org_id, email):\n        \"\"\"\n        Return non-zero if True, otherwise 0.\n        \"\"\"\n        return ccnet_threaded_rpc.org_user_exists(org_id, email)\n\n    def is_org_staff(self, org_id, user):\n        \"\"\"\n        Return non-zero if True, otherwise 0.\n        \"\"\"\n        return ccnet_threaded_rpc.is_org_staff(org_id, user)\n\n    def set_org_staff(self, org_id, user):\n        return ccnet_threaded_rpc.set_org_staff(org_id, user)\n\n    def unset_org_staff(self, org_id, user):\n        return ccnet_threaded_rpc.unset_org_staff(org_id, user)\n\n    def set_org_name(self, org_id, org_name):\n        return ccnet_threaded_rpc.set_org_name(org_id, org_name)\n\n    def get_primary_id (self, email):\n        return ccnet_threaded_rpc.get_primary_id(email)\n\n    def get_groups_members(self, group_ids):\n        \"\"\"\n        @group_ids: json '[id1, id2, id3,...]'\n        \"\"\"\n        return ccnet_threaded_rpc.get_groups_members(group_ids)\n\nccnet_api = CcnetAPI()\n"
  },
  {
    "path": "python/seaserv/service.py",
    "content": "from datetime import datetime\nimport json\nimport logging\nimport os\nimport sys\nimport configparser\nfrom urllib.parse import urlparse\n\nimport seafile\nimport re\nfrom pysearpc import SearpcError\n\n_DEBUG = 'SEAFILE_DEBUG' in os.environ\n\nENVIRONMENT_VARIABLES = ('SEAFILE_DATA_DIR', )\n\n# Used to fix bug in some rpc calls, will be removed in near future.\nMAX_INT = 2147483647\n\ndef _load_path_from_env(key, check=True):\n    v = os.environ.get(key, '')\n    if not v:\n        if check:\n            raise ImportError(\n                \"Seaserv cannot be imported, because environment variable %s is undefined.\" % key\n            )\n        return None\n    if _DEBUG:\n        print(\"Loading %s from %s\" % (key, v))\n    return os.path.normpath(os.path.expanduser(v))\n\ndef _load_data_dir():\n    data_dir = _load_path_from_env('SEAFILE_DATA_DIR', check=False)\n    if data_dir:\n        return data_dir\n\n    return _load_path_from_env('SEAFILE_CONF_DIR')\n\nSEAFILE_DATA_DIR = _load_data_dir()\n# SEAFILE_CENTRAL_CONF_DIR is required\nSEAFILE_CENTRAL_CONF_DIR = _load_path_from_env('SEAFILE_CENTRAL_CONF_DIR', check=True)\nSEAFILE_RPC_PIPE_PATH = _load_path_from_env (\"SEAFILE_RPC_PIPE_PATH\", check=False)\n\nseafile_pipe_path = os.path.join(SEAFILE_RPC_PIPE_PATH if SEAFILE_RPC_PIPE_PATH else SEAFILE_DATA_DIR,\n                                 'seafile.sock')\nseafserv_threaded_rpc = seafile.ServerThreadedRpcClient(seafile_pipe_path)\nccnet_threaded_rpc = seafserv_threaded_rpc\n\n# load ccnet server addr and port from ccnet.conf.\n# 'addr:port' is used when downloading a repo\nconfig = configparser.ConfigParser()\n\nconfig.read(os.path.join(SEAFILE_CENTRAL_CONF_DIR, 'seafile.conf'))\n\ndef get_fileserver_option(key, default):\n    '''\n    \"fileserver\" used to be \"httpserver\"\n    '''\n    for section in ('fileserver', 'httpserver'):\n        if config.has_option(section, key):\n            return config.get(section, key)\n\n    return default\n\nUSE_GO_FILESERVER = False\nif config.has_option('fileserver', 'use_go_fileserver'):\n    USE_GO_FILESERVER = config.getboolean('fileserver', 'use_go_fileserver')\n\nif \"ENABLE_GO_FILESERVER\" in os.environ and os.environ[\"ENABLE_GO_FILESERVER\"] == \"true\":\n    USE_GO_FILESERVER = True\n\nMAX_UPLOAD_FILE_SIZE = None # Defaults to no limit\ntry:\n    max_upload_size_mb = int(get_fileserver_option('max_upload_size', 0))\n    if max_upload_size_mb > 0:\n        MAX_UPLOAD_FILE_SIZE = max_upload_size_mb * 1000000\nexcept ValueError:\n    pass\n\nMAX_DOWNLOAD_DIR_SIZE = 100 * 1000000 # Default max size of a downloadable dir\ntry:\n    max_download_dir_size_mb = int(get_fileserver_option('max_download_dir_size', 0))\n    if max_download_dir_size_mb > 0:\n        MAX_DOWNLOAD_DIR_SIZE = max_download_dir_size_mb * 1000000\nexcept ValueError:\n    pass\n\nFILE_SERVER_PORT = get_fileserver_option('port', '8082')\nFILE_SERVER_ROOT = None\n\nCALC_SHARE_USAGE = False\nif config.has_option('quota', 'calc_share_usage'):\n    CALC_SHARE_USAGE = config.getboolean('quota', 'calc_share_usage')\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n#### Basic ccnet API ####\n\ndef get_emailusers(source, start, limit, is_active=None):\n    if is_active is True:\n        status = \"active\"       # list active users\n    elif is_active is False:\n        status = \"inactive\"     # list inactive users\n    else:\n        status = \"\"             # list all users\n\n    return ccnet_threaded_rpc.get_emailusers(source, start, limit, status)\n\ndef count_emailusers():\n    try:\n        ret = ccnet_threaded_rpc.count_emailusers()\n    except SearpcError:\n        ret = -1\n    return 0 if ret < 0 else ret\n\ndef get_emailuser_with_import(email):\n    return ccnet_threaded_rpc.get_emailuser_with_import(email)\n\n# group\ndef get_group(group_id):\n    group_id_int = int(group_id)\n    try:\n        group = ccnet_threaded_rpc.get_group(group_id_int)\n    except SearpcError:\n        group = None\n    return group\n\ndef get_personal_groups(start, limit):\n    try:\n        groups_all = ccnet_threaded_rpc.get_all_groups(start, limit)\n    except SearpcError:\n        return []\n    return [ x for x in groups_all if not is_org_group(x.id) ]\n\ndef get_personal_groups_by_user(email):\n    try:\n        groups_all = ccnet_threaded_rpc.get_groups(email)\n    except SearpcError:\n        return []\n\n    return [ x for x in groups_all if not is_org_group(x.id) ]\n    \n# group user\ndef is_group_user(group_id, user):\n    try:\n        ret = ccnet_threaded_rpc.is_group_user(group_id, user)\n    except SearpcError:\n        ret = 0\n    return ret\n\ndef check_group_staff(group_id, username):\n    \"\"\"Check where user is group staff\"\"\"\n    group_id = int(group_id)\n    try:\n        ret = ccnet_threaded_rpc.check_group_staff(group_id, username)\n    except SearpcError as e:\n        logger.error(e)\n        ret = 0\n\n    return True if ret == 1 else False\n\ndef remove_group_user(user):\n    \"\"\"\n    Remove group user relationship.\n    \"\"\"\n    return ccnet_threaded_rpc.remove_group_user(user)\n\ndef get_group_members(group_id, start=-1, limit=-1):\n    group_id_int = int(group_id)\n    try:\n        members = ccnet_threaded_rpc.get_group_members(group_id_int, start, limit)\n    except SearpcError:\n        members = []\n    return members\n\n# org group\ndef is_org_group(group_id):\n    try:\n        ret = ccnet_threaded_rpc.is_org_group(group_id)\n    except SearpcError:\n        ret = -1\n    return True if ret == 1 else False\n\ndef get_org_id_by_group(group_id):\n    try:\n        org_id = ccnet_threaded_rpc.get_org_id_by_group(group_id)\n    except SearpcError:\n        org_id = -1\n    return org_id\n\ndef get_org_groups(org_id, start, limit):\n    try:\n        groups = ccnet_threaded_rpc.get_org_groups(org_id, start, limit)\n    except SearpcError:\n        groups = []\n    return groups\n\ndef get_org_groups_by_user(org_id, user):\n    \"\"\"\n    Get user's groups in org.\n    \"\"\"\n    try:\n        groups_all = ccnet_threaded_rpc.get_groups(user)\n    except SearpcError:\n        return []\n\n    return [ x for x in groups_all if org_id == get_org_id_by_group(x.id) ]\n    \n# org\ndef create_org(org_name, url_prefix, username):\n    ccnet_threaded_rpc.create_org(org_name, url_prefix, username)\n\ndef get_org_by_url_prefix(url_prefix):\n    try:\n        org = ccnet_threaded_rpc.get_org_by_url_prefix(url_prefix)\n    except SearpcError:\n        org = None\n\n    return org\n\ndef get_org_by_id(org_id):\n    try:\n        org = ccnet_threaded_rpc.get_org_by_id(org_id)\n    except SearpcError:\n        org = None\n\n    return org\n\n# org user\ndef add_org_user(org_id, email, is_staff):\n    try:\n        ccnet_threaded_rpc.add_org_user(org_id, email, is_staff)\n    except SearpcError:\n        pass\n\ndef remove_org_user(org_id, email):\n    try:\n        ccnet_threaded_rpc.remove_org_user(org_id, email)\n    except SearpcError:\n        pass\n\ndef org_user_exists(org_id, user):\n    try:\n        ret = ccnet_threaded_rpc.org_user_exists(org_id, user)\n    except SearpcError:\n        ret = -1\n    return True if ret == 1 else False\n\ndef get_org_users_by_url_prefix(url_prefix, start, limit):\n    \"\"\"\n    List org users.\n    \"\"\"\n    try:\n        users = ccnet_threaded_rpc.get_org_emailusers(url_prefix, start, limit)\n    except:\n        users = []\n    return users\n\ndef get_orgs_by_user(user):\n    try:\n        orgs = ccnet_threaded_rpc.get_orgs_by_user(user)\n    except SearpcError:\n        orgs = []\n\n    return orgs\n\ndef is_org_staff(org_id, user):\n    \"\"\"\n    Check whether user is staff of a org.\n    \"\"\"\n    try:\n        ret = ccnet_threaded_rpc.is_org_staff(org_id, user)\n    except SearpcError:\n        ret = -1\n    return True if ret == 1 else False\n\ndef get_user_current_org(user, url_prefix):\n    orgs = get_orgs_by_user(user)\n    for org in orgs:\n        if org.url_prefix == url_prefix:\n            return org\n    return None\n\ndef send_command(command):\n    client = pool.get_client()\n    client.send_cmd(command)\n    ret = client.response[2]\n    pool.return_client(client)\n    return ret\n\ndef send_message(msg_type, content):\n    client = pool.get_client()\n    client.send_message(msg_type, content)\n    pool.return_client(client)\n\ndef get_binding_peerids(email):\n    \"\"\"Get peer ids of a given email\"\"\"\n    try:\n        peer_ids = ccnet_threaded_rpc.get_binding_peerids(email)\n    except SearpcError:\n        return []\n\n    if not peer_ids:\n        return []\n    \n    peerid_list = []\n    for peer_id in peer_ids.split(\"\\n\"):\n        if peer_id == '':\n            continue\n        peerid_list.append(peer_id)\n    return peerid_list\n\n######## seafserv API ####\n\n# repo\ndef get_repos():\n    \"\"\"\n    Return repository list.\n\n    \"\"\"\n    return seafserv_threaded_rpc.get_repo_list(\"\", 100)\n\ndef get_repo(repo_id):\n    return seafserv_threaded_rpc.get_repo(repo_id)\n\ndef edit_repo(repo_id, name, desc, user):\n    try:\n        ret = seafserv_threaded_rpc.edit_repo(repo_id, name, desc, user)\n    except SearpcError as e:\n        ret = -1\n    return True if ret == 0 else False\n\ndef create_repo(name, desc, user, passwd):\n    \"\"\"\n    Return repo id if successfully created a repo, otherwise None.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.create_repo(name, desc, user, passwd)\n    except SearpcError as e:\n        logger.error(e)\n        ret = None\n    return ret\n    \ndef remove_repo(repo_id):\n    \"\"\"\n    Return true if successfully removed a repo, otherwise false.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.remove_repo(repo_id)\n    except SearpcError as e:\n        logger.error(e)\n        ret = -1\n    return True if ret == 0 else False\n\ndef list_personal_repos_by_owner(owner):\n    \"\"\"\n    List users owned repos in personal context.\n    \"\"\"\n    try:\n        repos = seafserv_threaded_rpc.list_owned_repos(owner)\n    except SearpcError:\n        repos = []\n    return repos\n\ndef get_repo_token_nonnull(repo_id, username):\n    return seafserv_threaded_rpc.get_repo_token_nonnull (repo_id, username)\n\ndef get_repo_owner(repo_id):\n    \"\"\"\n    Get owner of a repo.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.get_repo_owner(repo_id)\n    except SearpcError:\n        ret = ''\n    return ret\n    \ndef is_repo_owner(user, repo_id):\n    \"\"\"\n    Check whether user is repo owner.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.is_repo_owner(user, repo_id)\n    except SearpcError:\n        ret = 0\n    return ret\n\ndef server_repo_size(repo_id):\n    try:\n        size = seafserv_threaded_rpc.server_repo_size(repo_id)\n    except SearpcError:\n        size = 0\n    return size\n\n# org repo\ndef create_org_repo(repo_name, repo_desc, user, passwd, org_id):\n    \"\"\"\n    Create org repo, return valid repo id if success.\n    \"\"\"\n    try:\n        repo_id = seafserv_threaded_rpc.create_org_repo(repo_name, repo_desc,\n                                                        user, passwd, org_id)\n    except SearpcError:\n        repo_id = None\n        \n    return repo_id\n\ndef is_org_repo(repo_id):\n    org_id = get_org_id_by_repo_id(repo_id)\n    return True if org_id > 0 else False\n\ndef list_org_repos_by_owner(org_id, user):\n    try:\n        repos = seafserv_threaded_rpc.list_org_repos_by_owner(org_id, user)\n    except SearpcError:\n        repos = []\n    return repos\n\ndef get_org_repos(org_id, start, limit):\n    \"\"\"\n    List repos created in org.\n    \"\"\"\n    try:\n        repos = seafserv_threaded_rpc.get_org_repo_list(org_id, start, limit)\n    except SearpcError:\n        repos = []\n\n    if repos:\n        for r in repos:\n            r.owner = get_org_repo_owner(r.id)\n            \n    return repos\n\ndef get_org_id_by_repo_id(repo_id):\n    \"\"\"\n    Get org id according repo id.\n    \"\"\"\n    try:\n        org_id = seafserv_threaded_rpc.get_org_id_by_repo_id(repo_id)\n    except SearpcError:\n        org_id = -1\n    return org_id\n\ndef is_org_repo_owner(org_id, repo_id, user):\n    \"\"\"\n    Check whether user is org repo owner.\n    NOTE:\n    \t`org_id` may used in future.\n    \"\"\"\n    owner = get_org_repo_owner(repo_id)\n    if not owner:\n        return False \n    return True if owner == user else False\n\ndef get_org_repo_owner(repo_id):\n    \"\"\"\n    Get owner of org repo.\n    \"\"\"\n    try:\n        owner = seafserv_threaded_rpc.get_org_repo_owner(repo_id)\n    except SearpcError:\n        owner = None\n    return owner\n\n# commit\ndef get_commit(repo_id, repo_version, cmt_id):\n    \"\"\" Get a commit. \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.get_commit(repo_id, repo_version, cmt_id)\n    except SearpcError:\n        ret = None\n    return ret\n    \ndef get_commits(repo_id, offset, limit):\n    \"\"\"Get commit lists.\"\"\"\n    try:\n        ret = seafserv_threaded_rpc.get_commit_list(repo_id, offset, limit)\n    except SearpcError:\n        ret = None\n    return ret\n\n# branch\ndef get_branches(repo_id):\n    \"\"\"Get branches of a given repo\"\"\"\n    return seafserv_threaded_rpc.branch_gets(repo_id)\n\n# group repo\ndef get_group_repos_by_owner(user):\n    \"\"\"\n    List user's repos that are sharing to groups\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.get_group_repos_by_owner(user)\n    except SearpcError:\n        ret = []\n    return ret\n\ndef get_shared_groups_by_repo(repo_id):\n    try:\n        group_ids = seafserv_threaded_rpc.get_shared_groups_by_repo(repo_id)\n    except SearpcError:\n        group_ids = ''\n    if not group_ids:\n        return []\n\n    groups = []\n    for group_id in group_ids.split('\\n'):\n        if not group_id:\n            continue\n        group = get_group(group_id)\n        if group:\n            groups.append(group)\n    return groups\n\ndef conv_repoids_to_list(repo_ids):\n    \"\"\"\n    Convert repo ids seperated by \"\\n\" to list.\n    \"\"\"\n    if not repo_ids:\n        return []\n\n    repoid_list = []\n    for repo_id in repo_ids.split(\"\\n\"):\n        if repo_id == '':\n            continue\n        repoid_list.append(repo_id)\n    return repoid_list\n    \ndef get_group_repoids(group_id):\n    \"\"\"Get repo ids of a given group id.\"\"\"\n    try:\n        repo_ids = seafserv_threaded_rpc.get_group_repoids(group_id)\n    except SearpcError:\n        return []\n\n    return conv_repoids_to_list(repo_ids)\n\ndef get_group_repos(group_id, user):\n    \"\"\"Get repos of a given group id.\"\"\"\n    repoid_list = get_group_repoids(group_id)\n\n    repos = []\n    for repo_id in repoid_list:\n        if not repo_id:\n            continue\n        repo = get_repo(repo_id)\n        if not repo:\n            continue\n\n        repo.owner = seafserv_threaded_rpc.get_group_repo_owner(repo_id)\n        repo.share_from_me = True if user == repo.owner else False\n\n        last_commit = get_commits(repo.id, 0, 1)[0]\n        repo.latest_modify = last_commit.ctime if last_commit else None\n\n        repos.append(repo)\n    repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))\n    \n    return repos\n\n# org group repo\ndef del_org_group_repo(repo_id, org_id, group_id):\n    seafserv_threaded_rpc.del_org_group_repo(repo_id, org_id, group_id)\n\ndef get_org_group_repoids(org_id, group_id):\n    try:\n        repo_ids = seafserv_threaded_rpc.get_org_group_repoids(org_id, group_id)\n    except SearpcError:\n        repo_ids = ''\n\n    return conv_repoids_to_list(repo_ids)\n\ndef get_org_group_repos(org_id, group_id, user):\n    \"\"\"Get org repos of a given group id.\"\"\"\n    repoid_list = get_org_group_repoids(org_id, group_id)\n    if not repoid_list:\n        return []\n    \n    repos = []\n    for repo_id in repoid_list:\n        if not repo_id:\n            continue\n        repo = get_repo(repo_id)\n        if not repo:\n            continue\n\n        repo.owner = seafserv_threaded_rpc.get_org_group_repo_owner(org_id,\n                                                                    group_id,\n                                                                    repo_id)\n        repo.sharecd_from_me = True if user == repo.owner else False\n\n        last_commit = get_commits(repo.id, 0, 1)[0]\n        repo.latest_modify = last_commit.ctime if last_commit else None\n\n        repos.append(repo)\n    repos.sort(lambda x, y: cmp(y.latest_modify, x.latest_modify))\n    \n    return repos\n\ndef get_org_groups_by_repo(org_id, repo_id):\n    try:\n        group_ids = seafserv_threaded_rpc.get_org_groups_by_repo(org_id,\n                                                                 repo_id)\n    except SearpcError:\n        group_ids = ''\n    if not group_ids:\n        return []\n\n    groups = []\n    for group_id in group_ids.split('\\n'):\n        if not group_id:\n            continue\n        group = get_group(group_id)\n        if group:\n            groups.append(group)\n    return groups\n    \n# inner pub repo\ndef list_inner_pub_repos_by_owner(user):\n    \"\"\"\n    List a user's inner pub repos.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.list_inner_pub_repos_by_owner(user)\n    except SearpcError:\n        ret = []\n    return ret\n\ndef list_inner_pub_repos(username):\n    \"\"\"\n    List inner pub repos, which can be access by everyone.\n    \"\"\"\n    try:\n        shared_repos = seafserv_threaded_rpc.list_inner_pub_repos()\n    except:\n        shared_repos = []\n\n    for repo in shared_repos:\n        repo.user_perm = check_permission(repo.props.repo_id, username)\n\n    shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified))\n    return shared_repos\n\ndef count_inner_pub_repos():\n    try:\n        ret = seafserv_threaded_rpc.count_inner_pub_repos()\n    except SearpcError:\n        ret = -1\n    return 0 if ret < 0 else ret\n\ndef is_inner_pub_repo(repo_id):\n    \"\"\"\n    Check whether a repo is public.\n    Return 0 if repo is not inner public, otherwise non-zero.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.is_inner_pub_repo(repo_id)\n    except SearpcError:\n        ret = 0\n\n    return ret\n\ndef unset_inner_pub_repo(repo_id):\n    seafserv_threaded_rpc.unset_inner_pub_repo(repo_id)\n        \n# org inner pub repo\ndef list_org_inner_pub_repos(org_id, username, start=None, limit=None):\n    \"\"\"\n    List org inner pub repos, which can be access by all org members.\n    \"\"\"\n    try:\n        shared_repos = seafserv_threaded_rpc.list_org_inner_pub_repos(org_id)\n    except SearpcError:\n        shared_repos = []\n\n    for repo in shared_repos:\n        repo.user_perm = check_permission(repo.props.repo_id, username)\n\n    # sort repos by last modify time\n    shared_repos.sort(lambda x, y: cmp(y.props.last_modified, x.props.last_modified))\n    return shared_repos\n\n# repo permissoin\ndef check_permission(repo_id, user):\n    \"\"\"\n    Check whether user has permission to access repo.\n    Return values can be 'rw' or 'r' or None.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.check_permission(repo_id, user)\n    except SearpcError:\n        ret = None\n    return ret\n\ndef is_personal_repo(repo_id):\n    \"\"\"\n    Check whether repo is personal repo.\n    \"\"\"\n    try:\n        owner = seafserv_threaded_rpc.get_repo_owner(repo_id)\n    except SearpcError:\n        owner = ''\n    return True if owner else False\n\n# shared repo\ndef list_share_repos(user, share_type, start, limit):\n    try:\n        ret = seafserv_threaded_rpc.list_share_repos(user, share_type,\n                                                     start, limit)\n    except SearpcError:\n        ret = []\n    return ret\n\ndef remove_share(repo_id, from_user, to_user):\n    seafserv_threaded_rpc.remove_share(repo_id, from_user, to_user)\n\ndef unshare_group_repo(repo_id, group_id, from_user):\n    return seafserv_threaded_rpc.group_unshare_repo(repo_id, int(group_id),\n                                                    from_user)\n        \ndef list_personal_shared_repos(user, user_type, start, limit):\n    \"\"\"\n    List personal repos that user share with others.\n    If `user_type` is 'from_email', list repos user shares to others;\n    If `user_type` is 'to_email', list repos others share to user.\n    \"\"\"\n    share_repos = list_share_repos(user, user_type, start, limit)\n    for repo in share_repos:\n        repo.user_perm = check_permission(repo.props.repo_id, user)\n\n    share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))\n    return share_repos\n\ndef list_org_shared_repos(org_id, user, user_type, start, limit):\n    \"\"\"\n    List org repos that user share with others.\n    If `user_type` is 'from_email', list repos user shares to others;\n    If `user_type` is 'to_email', list repos others sahre to user.\n    \"\"\"\n    try:\n        share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id,\n                                                                 user, user_type,\n                                                                 start, limit)\n    except SearpcError:\n        share_repos = []\n\n    for repo in share_repos:\n        repo.user_perm = check_permission(repo.props.repo_id, user)\n\n    share_repos.sort(lambda x, y: cmp(y.last_modified, x.last_modified))\n    return share_repos\n\n# dir\ndef list_dir_by_path(repo_id, commit_id, path):\n    try:\n        ret = seafserv_threaded_rpc.list_dir_by_path(repo_id, commit_id, path)\n    except SearpcError:\n        ret = None\n    return ret\n\n# file\ndef post_empty_file(repo_id, parent_dir, file_name, user):\n    \"\"\"\n    Return true if successfully make a new file, otherwise false.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.post_empty_file(repo_id, parent_dir,\n                                              file_name, user)\n    except SearpcError as e:\n        logger.error(e)\n        ret = -1\n    return True if ret == 0 else False\n\ndef del_file(repo_id, parent_dir, file_name, user):\n    \"\"\"\n    Return true if successfully delete a file, otherwise false.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.del_file(repo_id, parent_dir,\n                                             file_name, user)\n    except SearpcError as e:\n        logger.error(e)\n        ret = -1\n    return True if ret == 0 else False\n\n# misc functions\ndef is_valid_filename(file_or_dir):\n    \"\"\"\n    Check whether file name or directory name is valid.\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.is_valid_filename('', file_or_dir)\n    except SearpcError:\n        ret = 0\n\n    return ret\n\ndef get_file_size(store_id, version, file_id):\n    try:\n        fs = seafserv_threaded_rpc.get_file_size(store_id, version, file_id)\n    except SearpcError as e:\n        fs = 0\n    return fs\n\ndef get_file_id_by_path(repo_id, path):\n    try:\n        ret = seafserv_threaded_rpc.get_file_id_by_path(repo_id, path)\n    except SearpcError as e:\n        ret = ''\n    return ret\n\ndef get_related_users_by_repo(repo_id):\n    \"\"\"Give a repo id, returns a list of users of:\n    - the repo owner\n    - members of groups to which the repo is shared\n    - users to which the repo is shared\n    \"\"\"\n    owner = seafserv_threaded_rpc.get_repo_owner(repo_id)\n    if not owner:\n        # Can't happen\n        return []\n\n    users = [owner]\n\n    groups = get_shared_groups_by_repo(repo_id)\n\n    for group in groups:\n        members = get_group_members(group.id)\n        for member in members:\n            if member.user_name not in users:\n                users.append(member.user_name)\n\n    share_repos = list_share_repos(owner, 'from_email', -1, -1)\n    for repo in share_repos:\n        if repo.repo_id == repo_id:\n            if repo.user not in users:\n                users.append(repo.user)\n\n    return users\n\ndef get_related_users_by_org_repo(org_id, repo_id):\n    \"\"\"Org version of get_related_users_by_repo\n    \"\"\"\n    owner = get_org_repo_owner(repo_id)\n\n    if not owner:\n        # Can't happen\n        return []\n\n    users = [owner]\n\n    groups = get_org_groups_by_repo(org_id, repo_id)\n\n    for group in groups:\n        members = get_group_members(group.id)\n        for member in members:\n            if member.user_name not in users:\n                users.append(member.user_name)\n\n    share_repos = seafserv_threaded_rpc.list_org_share_repos(org_id, \\\n                                        owner, 'from_email', -1, -1)\n\n    for repo in share_repos:\n        if repo.repo_id == repo_id:\n            if repo.user not in users:\n                users.append(repo.user)\n\n    return users\n\n# quota\ndef check_quota(repo_id, delta=0):\n    try:\n        ret = seafserv_threaded_rpc.check_quota(repo_id, delta)\n    except SearpcError as e:\n        logger.error(e)\n        ret = -1\n    return ret\n\ndef get_user_quota(user):\n    try:\n        ret = seafserv_threaded_rpc.get_user_quota(user)\n    except SearpcError as e:\n        logger.error(e)\n        ret = 0\n    return ret\n\ndef get_user_quota_usage(user):\n    try:\n        ret = seafserv_threaded_rpc.get_user_quota_usage(user)\n    except SearpcError as e:\n        logger.error(e)\n        ret = 0\n    return ret\n\ndef get_user_share_usage(user):\n    try:\n        ret = seafserv_threaded_rpc.get_user_share_usage(user)\n    except SearpcError as e:\n        logger.error(e)\n        ret = 0\n    return ret\n    \n# access token\ndef web_get_access_token(repo_id, obj_id, op, username, use_onetime=1):\n    try:\n        ret = seafserv_rpc.web_get_access_token(repo_id, obj_id, op, username, use_onetime)\n    except SearpcError as e:\n        ret = ''\n    return ret\n    \n# password management\ndef unset_repo_passwd(repo_id, user):\n    \"\"\"\n    Remove user password of a encrypt repo.\n    Arguments:\n    - `repo_id`: encrypt repo id\n    - `user`: username\n    \"\"\"\n    try:\n        ret = seafserv_threaded_rpc.unset_passwd(repo_id, user)\n    except SearpcError as e:\n        ret = -1\n    return ret\n\ndef is_passwd_set(repo_id, user):\n    try:\n        ret = seafserv_rpc.is_passwd_set(repo_id, user)\n    except SearpcError as e:\n        ret = -1\n    return True if ret == 1 else False\n\n# repo history limit\ndef get_repo_history_limit(repo_id):\n    try:\n        ret = seafserv_threaded_rpc.get_repo_history_limit(repo_id)\n    except SearpcError as e:\n        ret = -1\n    return ret\n\ndef set_repo_history_limit(repo_id, days):\n    try:\n        ret = seafserv_threaded_rpc.set_repo_history_limit(repo_id, days)\n    except SearpcError as e:\n        ret = -1\n    return ret\n"
  },
  {
    "path": "run_tests.sh",
    "content": "#!/bin/bash\n\nset -e\n\nSCRIPT=${BASH_SOURCE[0]}\nPROJECT_DIR=$(dirname \"${SCRIPT}\")\n\ncd $PROJECT_DIR\n\nexport PYTHONPATH=$PROJECT_DIR:$PYTHONPATH\n\nci/run.py --test-only\n"
  },
  {
    "path": "scripts/Makefile.am",
    "content": "bin_SCRIPTS = parse_seahub_db.py\n\nEXTRA_DIST = parse_seahub_db.py\n"
  },
  {
    "path": "scripts/parse_seahub_db.py",
    "content": "import json\nimport seahub_settings\n\ndb_infos = seahub_settings.DATABASES['default']\n\nprint(json.dumps(db_infos))\n"
  },
  {
    "path": "scripts/sql/mysql/ccnet.sql",
    "content": "CREATE TABLE IF NOT EXISTS Binding (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  email VARCHAR(255),\n  peer_id CHAR(41),\n  UNIQUE INDEX (peer_id),\n  INDEX (email(20))\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS EmailUser (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  email VARCHAR(255),\n  passwd VARCHAR(256),\n  is_staff BOOL NOT NULL,\n  is_active BOOL NOT NULL,\n  is_department_owner BOOL NOT NULL DEFAULT 0,\n  ctime BIGINT,\n  reference_id VARCHAR(255),\n  UNIQUE INDEX (email),\n  UNIQUE INDEX (reference_id),\n  INDEX (is_active),\n  INDEX (is_department_owner)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS `Group` (\n  `group_id` BIGINT PRIMARY KEY AUTO_INCREMENT,\n  `group_name` VARCHAR(255),\n  `creator_name` VARCHAR(255),\n  `timestamp` BIGINT,\n  `type` VARCHAR(32),\n  `parent_group_id` INTEGER\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS GroupDNPair (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  group_id INTEGER,\n  dn VARCHAR(255)\n)ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS GroupStructure (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  group_id INTEGER,\n  path VARCHAR(1024),\n  UNIQUE INDEX(group_id)\n)ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS `GroupUser` (\n  `id` BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  `group_id` BIGINT,\n  `user_name` VARCHAR(255),\n  `is_staff` tinyint,\n  UNIQUE INDEX (`group_id`, `user_name`),\n  INDEX (`user_name`)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS LDAPConfig (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  cfg_group VARCHAR(255) NOT NULL,\n  cfg_key VARCHAR(255) NOT NULL,\n  value VARCHAR(255),\n  property INTEGER\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS LDAPUsers (\n  id BIGINT PRIMARY KEY AUTO_INCREMENT,\n  email VARCHAR(255) NOT NULL,\n  password varchar(255) NOT NULL,\n  is_staff BOOL NOT NULL,\n  is_active BOOL NOT NULL,\n  extra_attrs TEXT,\n  reference_id VARCHAR(255),\n  UNIQUE INDEX(email),\n  UNIQUE INDEX (reference_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgGroup (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  group_id INTEGER,\n  INDEX (group_id),\n  UNIQUE INDEX(org_id, group_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgUser (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  email VARCHAR(255),\n  is_staff BOOL NOT NULL,\n  INDEX (email),\n  UNIQUE INDEX(org_id, email)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS Organization (\n  org_id BIGINT PRIMARY KEY AUTO_INCREMENT,\n  org_name VARCHAR(255),\n  url_prefix VARCHAR(255),\n  creator VARCHAR(255),\n  ctime BIGINT,\n  UNIQUE INDEX (url_prefix)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS UserRole (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  email VARCHAR(255),\n  role VARCHAR(255),\n  is_manual_set INTEGER DEFAULT 0,\n  UNIQUE INDEX (email)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgFileExtWhiteList (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  white_list TEXT,\n  UNIQUE INDEX (org_id)\n) ENGINE=INNODB;\n"
  },
  {
    "path": "scripts/sql/mysql/seafile.sql",
    "content": "CREATE TABLE IF NOT EXISTS Branch (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  name VARCHAR(10),\n  repo_id CHAR(41),\n  commit_id CHAR(41),\n  UNIQUE INDEX(repo_id, name)\n) ENGINE = INNODB;\n\nCREATE TABLE IF NOT EXISTS FileLockTimestamp (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(40),\n  update_time BIGINT NOT NULL,\n  UNIQUE INDEX(repo_id)\n);\n\nCREATE TABLE IF NOT EXISTS FileLocks (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(40) NOT NULL,\n  path TEXT NOT NULL,\n  user_name VARCHAR(255) NOT NULL,\n  lock_time BIGINT,\n  expire BIGINT,\n  KEY(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS FolderGroupPerm (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36) NOT NULL,\n  path TEXT NOT NULL,\n  permission CHAR(15),\n  group_id INTEGER NOT NULL,\n  INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS FolderPermTimestamp (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  timestamp BIGINT,\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS FolderUserPerm (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36) NOT NULL,\n  path TEXT NOT NULL,\n  permission CHAR(15),\n  user VARCHAR(255) NOT NULL,\n  INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS GCID (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  gc_id CHAR(36),\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS GarbageRepos (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  UNIQUE INDEX(repo_id)\n);\n\nCREATE TABLE IF NOT EXISTS InnerPubRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  permission CHAR(15),\n  UNIQUE INDEX (repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS LastGCID (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  client_id VARCHAR(128),\n  gc_id CHAR(36),\n  UNIQUE INDEX(repo_id, client_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgGroupRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  repo_id CHAR(37),\n  group_id INTEGER,\n  owner VARCHAR(255),\n  permission CHAR(15),\n  UNIQUE INDEX(org_id, group_id, repo_id),\n  INDEX (repo_id), INDEX (owner)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgInnerPubRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  repo_id CHAR(37),\n  UNIQUE INDEX(org_id, repo_id),\n  permission CHAR(15)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  quota BIGINT,\n  UNIQUE INDEX(org_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  repo_id CHAR(37),\n  user VARCHAR(255),\n  UNIQUE INDEX(org_id, repo_id),\n  UNIQUE INDEX (repo_id),\n  INDEX (org_id, user),\n  INDEX(user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgSharedRepo (\n  id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INT,\n  repo_id CHAR(37) ,\n  from_email VARCHAR(255),\n  to_email VARCHAR(255),\n  permission CHAR(15),\n  INDEX(repo_id),\n  INDEX (org_id, repo_id),\n  INDEX(from_email), INDEX(to_email)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgUserQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  user VARCHAR(255),\n  quota BIGINT,\n  UNIQUE INDEX(org_id, user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS Repo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  UNIQUE INDEX (repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoFileCount (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  file_count BIGINT UNSIGNED,\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoGroup (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  group_id INTEGER,\n  user_name VARCHAR(255),\n  permission CHAR(15),\n  UNIQUE INDEX(group_id, repo_id),\n  INDEX (repo_id), INDEX (user_name)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoHead (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  branch_name VARCHAR(10),\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoHistoryLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  days INTEGER,\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  name VARCHAR(255) NOT NULL,\n  update_time BIGINT,\n  version INTEGER,\n  is_encrypted INTEGER,\n  last_modifier VARCHAR(255),\n  status INTEGER DEFAULT 0,\n  type VARCHAR(10),\n  UNIQUE INDEX(repo_id),\n  INDEX (type)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoOwner (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  owner_id VARCHAR(255),\n  UNIQUE INDEX (repo_id),\n  INDEX (owner_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoSize (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  size BIGINT UNSIGNED,\n  head_id CHAR(41),\n  UNIQUE INDEX (repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoStorageId (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(40) NOT NULL,\n  storage_id VARCHAR(255) NOT NULL,\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoSyncError (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  token CHAR(41),\n  error_time BIGINT UNSIGNED,\n  error_con VARCHAR(1024),\n  UNIQUE INDEX(token)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  token CHAR(41),\n  peer_id CHAR(41),\n  peer_ip VARCHAR(50),\n  peer_name VARCHAR(255),\n  sync_time BIGINT,\n  client_ver VARCHAR(20),\n  UNIQUE INDEX(token),\n  INDEX(peer_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoTrash (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  repo_name VARCHAR(255),\n  head_id CHAR(40),\n  owner_id VARCHAR(255),\n  size BIGINT(20),\n  org_id INTEGER,\n  del_time BIGINT,\n  UNIQUE INDEX(repo_id),\n  INDEX(owner_id),\n  INDEX(org_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoUserToken (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  email VARCHAR(255),\n  token CHAR(41),\n  UNIQUE INDEX(repo_id, token),\n  INDEX(token),\n  INDEX (email)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RepoValidSince (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  timestamp BIGINT,\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RoleQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  role VARCHAR(255),\n  quota BIGINT,\n  UNIQUE INDEX(role)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS SeafileConf (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  cfg_group VARCHAR(255) NOT NULL,\n  cfg_key VARCHAR(255) NOT NULL,\n  value VARCHAR(255),\n  property INTEGER\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS SharedRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37) ,\n  from_email VARCHAR(255),\n  to_email VARCHAR(255),\n  permission CHAR(15),\n  INDEX (repo_id),\n  INDEX(from_email),\n  INDEX(to_email)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS SystemInfo (\n  id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  info_key VARCHAR(256),\n  info_value VARCHAR(1024)\n);\n\nCREATE TABLE IF NOT EXISTS UserQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  user VARCHAR(255),\n  quota BIGINT,\n  UNIQUE INDEX(user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS UserShareQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  user VARCHAR(255),\n  quota BIGINT,\n  UNIQUE INDEX(user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS VirtualRepo (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(36),\n  origin_repo CHAR(36),\n  path TEXT,\n  base_commit CHAR(40),\n  UNIQUE INDEX(repo_id),\n  INDEX(origin_repo)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS WebAP (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(37),\n  access_property CHAR(10),\n  UNIQUE INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS WebUploadTempFiles (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  repo_id CHAR(40) NOT NULL,\n  file_path TEXT NOT NULL,\n  tmp_file_path TEXT NOT NULL,\n  INDEX(repo_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RoleUploadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  role VARCHAR(255),\n  upload_limit BIGINT,\n  UNIQUE INDEX(role)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS RoleDownloadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  role VARCHAR(255),\n  download_limit BIGINT,\n  UNIQUE INDEX(role)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS UserUploadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  user VARCHAR(255),\n  upload_limit BIGINT,\n  UNIQUE INDEX(user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS UserDownloadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  user VARCHAR(255),\n  download_limit BIGINT,\n  UNIQUE INDEX(user)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgUserDefaultQuota (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \n  org_id INTEGER,\n  quota BIGINT, UNIQUE INDEX(org_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgDownloadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  download_limit BIGINT,\n  UNIQUE INDEX(org_id)\n) ENGINE=INNODB;\n\nCREATE TABLE IF NOT EXISTS OrgUploadRateLimit (\n  id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\n  org_id INTEGER,\n  upload_limit BIGINT,\n  UNIQUE INDEX(org_id)\n) ENGINE=INNODB;\n"
  },
  {
    "path": "scripts/sql/sqlite/config.sql",
    "content": "CREATE TABLE IF NOT EXISTS Config (key TEXT PRIMARY KEY, value TEXT);\n"
  },
  {
    "path": "scripts/sql/sqlite/groupmgr.sql",
    "content": "CREATE TABLE IF NOT EXISTS `Group` (`group_id` INTEGER PRIMARY KEY AUTOINCREMENT, `group_name` VARCHAR(255), `creator_name` VARCHAR(255), `timestamp` BIGINT,  `type` VARCHAR(32), `parent_group_id` INTEGER);\nCREATE TABLE IF NOT EXISTS `GroupUser` (`group_id` INTEGER, `user_name` VARCHAR(255), `is_staff` tinyint);\nCREATE UNIQUE INDEX IF NOT EXISTS groupid_username_indx on `GroupUser` (`group_id`, `user_name`);\nCREATE INDEX IF NOT EXISTS username_indx on `GroupUser` (`user_name`);\nCREATE TABLE IF NOT EXISTS GroupDNPair (group_id INTEGER,  dn VARCHAR(255));\nCREATE TABLE IF NOT EXISTS GroupStructure (group_id INTEGER PRIMARY KEY, path VARCHAR(1024));\n\n"
  },
  {
    "path": "scripts/sql/sqlite/org.sql",
    "content": "CREATE TABLE IF NOT EXISTS OrgGroup (org_id INTEGER, group_id INTEGER);\nCREATE INDEX IF NOT EXISTS groupid_indx on OrgGroup (group_id);\n\n\nCREATE TABLE IF NOT EXISTS Organization (org_id INTEGER PRIMARY KEY AUTOINCREMENT, org_name VARCHAR(255), url_prefix VARCHAR(255), creator VARCHAR(255), ctime BIGINT);\nCREATE UNIQUE INDEX IF NOT EXISTS url_prefix_indx on Organization (url_prefix);\n\nCREATE TABLE IF NOT EXISTS OrgUser (org_id INTEGER, email TEXT, is_staff bool NOT NULL);\nCREATE INDEX IF NOT EXISTS email_indx on OrgUser (email);\nCREATE UNIQUE INDEX IF NOT EXISTS orgid_email_indx on OrgUser (org_id, email);\n"
  },
  {
    "path": "scripts/sql/sqlite/seafile.sql",
    "content": "CREATE TABLE IF NOT EXISTS Branch (name VARCHAR(10), repo_id CHAR(40), commit_id CHAR(40), PRIMARY KEY (repo_id, name));\nCREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(37) PRIMARY KEY);\nCREATE TABLE IF NOT EXISTS RepoOwner (repo_id CHAR(37) PRIMARY KEY, owner_id TEXT);\nCREATE INDEX IF NOT EXISTS OwnerIndex ON RepoOwner (owner_id);\n\nCREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(37), group_id INTEGER, user_name TEXT, permission CHAR(15));\nCREATE UNIQUE INDEX IF NOT EXISTS groupid_repoid_indx on RepoGroup (group_id, repo_id);\nCREATE INDEX IF NOT EXISTS repogroup_repoid_index on RepoGroup (repo_id);\nCREATE INDEX IF NOT EXISTS repogroup_username_indx on RepoGroup (user_name);\nCREATE TABLE IF NOT EXISTS InnerPubRepo (repo_id CHAR(37) PRIMARY KEY, permission CHAR(15));\n\nCREATE TABLE IF NOT EXISTS OrgRepo (org_id INTEGER, repo_id CHAR(37), user VARCHAR(255));\nCREATE UNIQUE INDEX IF NOT EXISTS repoid_indx on OrgRepo (repo_id);\nCREATE INDEX IF NOT EXISTS orgid_repoid_indx on OrgRepo (org_id, repo_id);\nCREATE INDEX IF NOT EXISTS orgrepo_orgid_user_indx on OrgRepo (org_id, user);\nCREATE INDEX IF NOT EXISTS orgrepo_user_indx on OrgRepo (user);\nCREATE TABLE IF NOT EXISTS OrgGroupRepo (org_id INTEGER, repo_id CHAR(37), group_id INTEGER, owner VARCHAR(255), permission CHAR(15));\nCREATE UNIQUE INDEX IF NOT EXISTS orgid_groupid_repoid_indx on OrgGroupRepo (org_id, group_id, repo_id);\nCREATE INDEX IF NOT EXISTS org_repoid_index on OrgGroupRepo (repo_id);\nCREATE INDEX IF NOT EXISTS org_owner_indx on OrgGroupRepo (owner);\nCREATE TABLE IF NOT EXISTS OrgInnerPubRepo (org_id INTEGER, repo_id CHAR(37), permission CHAR(15), PRIMARY KEY (org_id, repo_id));\nCREATE TABLE IF NOT EXISTS RepoUserToken (repo_id CHAR(37), email VARCHAR(255), token CHAR(41));\nCREATE UNIQUE INDEX IF NOT EXISTS repo_token_indx on RepoUserToken (repo_id, token);\nCREATE INDEX IF NOT EXISTS repo_token_email_indx on RepoUserToken (email);\nCREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (token CHAR(41) PRIMARY KEY, peer_id CHAR(41), peer_ip VARCHAR(50), peer_name VARCHAR(255), sync_time BIGINT, client_ver VARCHAR(20));\nCREATE TABLE IF NOT EXISTS RepoSyncError (token CHAR(41) PRIMARY KEY, error_time BIGINT, error_con VARCHAR(1024));\nCREATE TABLE IF NOT EXISTS RepoHead (repo_id CHAR(37) PRIMARY KEY, branch_name VARCHAR(10));\nCREATE TABLE IF NOT EXISTS RepoSize (repo_id CHAR(37) PRIMARY KEY, size BIGINT UNSIGNED, head_id CHAR(41));\nCREATE TABLE IF NOT EXISTS RepoHistoryLimit (repo_id CHAR(37) PRIMARY KEY, days INTEGER);\nCREATE TABLE IF NOT EXISTS RepoValidSince (repo_id CHAR(37) PRIMARY KEY, timestamp BIGINT);\nCREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(37) PRIMARY KEY, access_property CHAR(10));\nCREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY, origin_repo CHAR(36), path TEXT, base_commit CHAR(40));\nCREATE INDEX IF NOT EXISTS virtualrepo_origin_repo_idx ON VirtualRepo (origin_repo);\nCREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY);\nCREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY, repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT UNSIGNED, org_id INTEGER, del_time BIGINT);\nCREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id);\nCREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id);\nCREATE TABLE IF NOT EXISTS RepoFileCount (repo_id CHAR(36) PRIMARY KEY, file_count BIGINT UNSIGNED);\nCREATE TABLE IF NOT EXISTS FolderUserPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), user VARCHAR(255) NOT NULL);\nCREATE INDEX IF NOT EXISTS folder_user_perm_idx ON FolderUserPerm(repo_id);\nCREATE TABLE IF NOT EXISTS FolderGroupPerm (repo_id CHAR(36) NOT NULL, path TEXT NOT NULL, permission CHAR(15), group_id INTEGER NOT NULL);\nCREATE INDEX IF NOT EXISTS folder_group_perm_idx ON FolderGroupPerm(repo_id);\nCREATE TABLE IF NOT EXISTS FolderPermTimestamp (repo_id CHAR(36) PRIMARY KEY, timestamp INTEGER);\nCREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL);\nCREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0, type VARCHAR(10));\nCREATE INDEX IF NOT EXISTS RepoInfoTypeIndex on RepoInfo (type);\nCREATE TABLE IF NOT EXISTS RepoStorageId (repo_id CHAR(40) NOT NULL, storage_id VARCHAR(255) NOT NULL);\nCREATE TABLE IF NOT EXISTS UserQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT);\nCREATE TABLE IF NOT EXISTS UserShareQuota (user VARCHAR(255) PRIMARY KEY, quota BIGINT);\nCREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY, quota BIGINT);\nCREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER, user VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, user));\nCREATE TABLE IF NOT EXISTS RoleQuota (role VARCHAR(255) PRIMARY KEY, quota BIGINT);\nCREATE TABLE IF NOT EXISTS SeafileConf (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER);\nCREATE TABLE IF NOT EXISTS FileLocks (repo_id CHAR(40) NOT NULL, path TEXT NOT NULL, user_name VARCHAR(255) NOT NULL, lock_time BIGINT, expire BIGINT);\nCREATE INDEX IF NOT EXISTS FileLocksIndex ON FileLocks (repo_id);\nCREATE TABLE IF NOT EXISTS FileLockTimestamp (repo_id CHAR(40) PRIMARY KEY, update_time BIGINT NOT NULL);\nCREATE TABLE IF NOT EXISTS SharedRepo (repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15));\nCREATE INDEX IF NOT EXISTS RepoIdIndex on SharedRepo (repo_id);\nCREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email);\nCREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email);\nCREATE TABLE IF NOT EXISTS OrgSharedRepo (org_id INTEGER, repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), permission CHAR(15));\nCREATE INDEX IF NOT EXISTS OrgRepoIdIndex on OrgSharedRepo (org_id, repo_id);\nCREATE INDEX IF NOT EXISTS OrgFromEmailIndex on OrgSharedRepo (from_email);\nCREATE INDEX IF NOT EXISTS OrgToEmailIndex on OrgSharedRepo (to_email);\nCREATE INDEX IF NOT EXISTS OrgLibIdIndex on OrgSharedRepo (repo_id);\nCREATE TABLE IF NOT EXISTS SystemInfo (info_key VARCHAR(256), info_value VARCHAR(1024));\n"
  },
  {
    "path": "scripts/sql/sqlite/user.sql",
    "content": "CREATE TABLE IF NOT EXISTS Binding (email TEXT, peer_id TEXT);\nCREATE UNIQUE INDEX IF NOT EXISTS peer_index on Binding (peer_id);\n\nCREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, email TEXT, passwd TEXT, is_staff bool NOT NULL, is_active bool NOT NULL, ctime INTEGER, reference_id TEXT);\nCREATE UNIQUE INDEX IF NOT EXISTS email_index on EmailUser (email);\nCREATE UNIQUE INDEX IF NOT EXISTS reference_id_index on EmailUser (reference_id);\n\nCREATE TABLE IF NOT EXISTS LDAPConfig (cfg_group VARCHAR(255) NOT NULL, cfg_key VARCHAR(255) NOT NULL, value VARCHAR(255), property INTEGER);\n\nCREATE TABLE IF NOT EXISTS LDAPUsers (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT NOT NULL, password TEXT NOT NULL, is_staff BOOL NOT NULL, is_active BOOL NOT NULL, extra_attrs TEXT, reference_id TEXT);\nCREATE UNIQUE INDEX IF NOT EXISTS ldapusers_email_index on LDAPUsers(email);\nCREATE UNIQUE INDEX IF NOT EXISTS ldapusers_reference_id_index on LDAPUsers(reference_id);\n\nCREATE TABLE IF NOT EXISTS UserRole (email TEXT, role TEXT, is_manual_set INTEGER DEFAULT 0);\nCREATE INDEX IF NOT EXISTS userrole_email_index on UserRole (email);\nCREATE UNIQUE INDEX IF NOT EXISTS userrole_userrole_index on UserRole (email, role);\n"
  },
  {
    "path": "server/Makefile.am",
    "content": "SUBDIRS = gc\n\nAM_CFLAGS = -DPKGDATADIR=\\\"$(pkgdatadir)\\\" \\\n\t-DPACKAGE_DATA_DIR=\\\"\"$(pkgdatadir)\"\\\" \\\n\t-DSEAFILE_SERVER \\\n\t-DFULL_FEATURE \\\n\t-I$(top_srcdir)/include \\\n\t-I$(top_srcdir)/lib \\\n\t-I$(top_builddir)/lib \\\n\t-I$(top_srcdir)/common \\\n\t@SEARPC_CFLAGS@ \\\n\t@GLIB2_CFLAGS@ \\\n\t@MSVC_CFLAGS@ \\\n\t@LIBARCHIVE_CFLAGS@ \\\n\t@MYSQL_CFLAGS@ \\\n\t@LIBHIREDIS_CFLAGS@ \\\n\t-Wall\n\nbin_PROGRAMS = seaf-server\n\nnoinst_HEADERS = web-accesstoken-mgr.h  seafile-session.h \\\n\trepo-mgr.h \\\n\tshare-mgr.h \\\n\tpasswd-mgr.h \\\n\tquota-mgr.h \\\n\tsize-sched.h \\\n\tcopy-mgr.h \\\n\thttp-server.h \\\n\tupload-file.h \\\n\taccess-file.h \\\n\tpack-dir.h \\\n\tfileserver-config.h \\\n\thttp-status-codes.h \\\n\tzip-download-mgr.h \\\n\t../common/user-mgr.h \\\n\t../common/group-mgr.h \\\n\t../common/org-mgr.h \\\n\tindex-blocks-mgr.h \\\n\thttp-tx-mgr.h \\\n\tnotif-mgr.h \\\n\tchange-set.h \\\n\tmetric-mgr.h\n\nseaf_server_SOURCES = \\\n\tseaf-server.c \\\n\tweb-accesstoken-mgr.c  seafile-session.c \\\n\tzip-download-mgr.c \\\n\tindex-blocks-mgr.c \\\n\tshare-mgr.c \\\n\tpasswd-mgr.c \\\n\tquota-mgr.c \\\n\trepo-op.c \\\n\trepo-perm.c \\\n\tsize-sched.c \\\n\tvirtual-repo.c \\\n\tcopy-mgr.c \\\n\thttp-server.c \\\n\tupload-file.c \\\n\taccess-file.c \\\n\tpack-dir.c \\\n\tfileserver-config.c \\\n\thttp-tx-mgr.c \\\n\tnotif-mgr.c \\\n\tchange-set.c \\\n\tmetric-mgr.c \\\n\t../common/seaf-db.c \\\n\t../common/branch-mgr.c ../common/fs-mgr.c \\\n\t../common/config-mgr.c \\\n\trepo-mgr.c ../common/commit-mgr.c \\\n\t../common/log.c ../common/object-list.c \\\n\t../common/rpc-service.c \\\n\t../common/vc-common.c \\\n\t../common/seaf-utils.c \\\n\t../common/obj-store.c \\\n\t../common/obj-backend-fs.c \\\n\t../common/seafile-crypt.c \\\n\t../common/password-hash.c \\\n\t../common/diff-simple.c \\\n\t../common/mq-mgr.c \\\n\t../common/user-mgr.c \\\n\t../common/group-mgr.c \\\n\t../common/org-mgr.c \\\n\t../common/block-mgr.c \\\n\t../common/block-backend.c \\\n\t../common/block-backend-fs.c \\\n\t../common/merge-new.c \\\n\t../common/obj-cache.c \\\n\t../common/redis-cache.c \\\n\t../common/block-tx-utils.c\n\nseaf_server_LDADD = $(top_builddir)/lib/libseafile_common.la \\\n\t@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ @EVHTP_LIBS@ \\\n\t$(top_builddir)/common/cdc/libcdc.la \\\n\t@SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \\\n\t@LIBARCHIVE_LIBS@ @LIB_ICONV@ \\\n\t@MYSQL_LIBS@ -lsqlite3 \\\n\t@CURL_LIBS@ @JWT_LIBS@ @LIBHIREDIS_LIBS@ @ARGON2_LIBS@\n"
  },
  {
    "path": "server/access-file.c",
    "content": "#include \"common.h\"\n\n#ifdef HAVE_EVHTP\n#define DEBUG_FLAG SEAFILE_DEBUG_HTTP\n#include \"log.h\"\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#include <event2/bufferevent.h>\n#include <event2/bufferevent_struct.h>\n#else\n#include <event.h>\n#endif\n\n#include <evhtp.h>\n\n#include <sys/stat.h>\n#include <sys/types.h>\n#include <fcntl.h>\n\n#include \"seafile-object.h\"\n#include \"seafile-crypt.h\"\n\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"access-file.h\"\n#include \"zip-download-mgr.h\"\n#include \"http-server.h\"\n#include \"seaf-utils.h\"\n\n#define FILE_TYPE_MAP_DEFAULT_LEN 1\n#define BUFFER_SIZE 1024 * 64\n\nstruct file_type_map {\n    char *suffix;\n    char *type;\n};\n\ntypedef struct SendBlockData {\n    evhtp_request_t *req;\n    char *block_id;\n    BlockHandle *handle;\n    uint32_t bsize;\n    uint32_t remain;\n\n    char store_id[37];\n    int repo_version;\n\n    char *user;\n\n    bufferevent_data_cb saved_read_cb;\n    bufferevent_data_cb saved_write_cb;\n    bufferevent_event_cb saved_event_cb;\n    void *saved_cb_arg;\n} SendBlockData;\n\ntypedef struct SendfileData {\n    evhtp_request_t *req;\n    Seafile *file;\n    SeafileCrypt *crypt;\n    gboolean enc_init;\n    EVP_CIPHER_CTX *ctx;\n    BlockHandle *handle;\n    size_t remain;\n    int idx;\n\n    char store_id[37];\n    int repo_version;\n\n    char *user;\n    char *token_type;\n\n    bufferevent_data_cb saved_read_cb;\n    bufferevent_data_cb saved_write_cb;\n    bufferevent_event_cb saved_event_cb;\n    void *saved_cb_arg;\n} SendfileData;\n\ntypedef struct SendFileRangeData {\n    evhtp_request_t *req;\n    Seafile *file;\n    BlockHandle *handle;\n    int blk_idx;\n    guint64 start_off;\n    guint64 range_remain;\n\n    char store_id[37];\n    int repo_version;\n\n    char *user;\n    char *token_type;\n\n    bufferevent_data_cb saved_read_cb;\n    bufferevent_data_cb saved_write_cb;\n    bufferevent_event_cb saved_event_cb;\n    void *saved_cb_arg;\n} SendFileRangeData;\n\ntypedef struct SendDirData {\n    evhtp_request_t *req;\n    size_t remain;\n    guint64 total_size;\n\n    int zipfd;\n    char *zipfile;\n    char *token;\n    char *user;\n    char *token_type;\n    char repo_id[37];\n\n    bufferevent_data_cb saved_read_cb;\n    bufferevent_data_cb saved_write_cb;\n    bufferevent_event_cb saved_event_cb;\n    void *saved_cb_arg;\n} SendDirData;\n\n\n\nextern SeafileSession *seaf;\n\nstatic struct file_type_map ftmap[] = {\n    { \"txt\", \"text/plain\" },\n    { \"doc\", \"application/vnd.ms-word\" },\n    { \"docx\", \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\" },\n    { \"ppt\", \"application/vnd.ms-powerpoint\" },\n    { \"pptx\", \"application/vnd.openxmlformats-officedocument.presentationml.presentation\" },\n    { \"xls\", \"application/vnd.ms-excel\" },\n    { \"xlsx\", \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" },\n    { \"pdf\", \"application/pdf\" },\n    { \"zip\", \"application/zip\"},\n    { \"mp3\", \"audio/mp3\" },\n    { \"mpeg\", \"video/mpeg\" },\n    { \"mp4\", \"video/mp4\" },\n    { \"ogv\", \"video/ogg\" },\n    { \"mov\", \"video/mp4\" },\n    { \"webm\", \"video/webm\" },\n    { \"mkv\", \"video/x-matroska\" },\n    { \"jpg\", \"image/jpeg\" },\n    { \"JPG\", \"image/jpeg\" },\n    { \"jpeg\", \"image/jpeg\" },\n    { \"JPEG\", \"image/jpeg\" },\n    { \"png\", \"image/png\" },\n    { \"PNG\", \"image/png\" },\n    { \"gif\", \"image/gif\" },\n    { \"GIF\", \"image/gif\" },\n    { \"svg\", \"image/svg+xml\" },\n    { \"SVG\", \"image/svg+xml\" },\n    { \"heic\", \"image/heic\" },\n    { \"ico\", \"image/x-icon\" },\n    { \"bmp\", \"image/bmp\" },\n    { \"tif\", \"image/tiff\" },\n    { \"tiff\", \"image/tiff\" },\n    { \"psd\", \"image/vnd.adobe.photoshop\" },\n    { \"webp\", \"image/webp\" },\n    { \"jfif\", \"image/jpeg\" },\n    { NULL, NULL },\n};\n\nstatic void\nfree_sendblock_data (SendBlockData *data)\n{\n    if (data->handle) {\n        seaf_block_manager_close_block(seaf->block_mgr, data->handle);\n        seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle);\n    }\n\n    g_free (data->block_id);\n    g_free (data->user);\n    g_free (data);\n}\n\nstatic void\nfree_sendfile_data (SendfileData *data)\n{\n    if (data->handle) {\n        seaf_block_manager_close_block(seaf->block_mgr, data->handle);\n        seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle);\n    }\n\n    if (data->enc_init)\n        EVP_CIPHER_CTX_free (data->ctx);\n\n    seafile_unref (data->file);\n    g_free (data->user);\n    g_free (data->token_type);\n    g_free (data->crypt);\n    g_free (data);\n}\n\nstatic void\nfree_send_file_range_data (SendFileRangeData *data)\n{\n    if (data->handle) {\n        seaf_block_manager_close_block(seaf->block_mgr, data->handle);\n        seaf_block_manager_block_handle_free(seaf->block_mgr, data->handle);\n    }\n\n    seafile_unref (data->file);\n    g_free (data->user);\n    g_free (data->token_type);\n    g_free (data);\n}\n\nstatic void\nfree_senddir_data (SendDirData *data)\n{\n    close (data->zipfd);\n\n    zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, data->token);\n\n    g_free (data->user);\n    g_free (data->token_type);\n    g_free (data->token);\n    g_free (data);\n}\n\nstatic void\nwrite_block_data_cb (struct bufferevent *bev, void *ctx)\n{\n    SendBlockData *data = ctx;\n    char *blk_id;\n    BlockHandle *handle;\n    char buf[1024 * 64];\n    int n;\n\n    blk_id = data->block_id;\n\n    if (!data->handle) {\n        data->handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                                     data->store_id,\n                                                     data->repo_version,\n                                                     blk_id, BLOCK_READ);\n        if (!data->handle) {\n            seaf_warning (\"Failed to open block %s:%s\\n\", data->store_id, blk_id);\n            goto err;\n        }\n\n        data->remain = data->bsize;\n    }\n    handle = data->handle;\n\n    n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, sizeof(buf));\n    data->remain -= n;\n    if (n < 0) {\n        seaf_warning (\"Error when reading from block %s:%s.\\n\",\n                      data->store_id, blk_id);\n        goto err;\n    } else if (n == 0) {\n        /* We've read up the data of this block, finish. */\n        seaf_block_manager_close_block (seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n        data->handle = NULL;\n\n        /* Recover evhtp's callbacks */\n        bev->readcb = data->saved_read_cb;\n        bev->writecb = data->saved_write_cb;\n        bev->errorcb = data->saved_event_cb;\n        bev->cbarg = data->saved_cb_arg;\n\n        /* Resume reading incomming requests. */\n        evhtp_request_resume (data->req);\n\n        evhtp_send_reply_end (data->req);\n\n        send_statistic_msg (data->store_id, data->user, \"web-file-download\", (guint64)data->bsize);\n\n        free_sendblock_data (data);\n        return;\n    }\n\n    /* OK, we've got some data to send. */\n    bufferevent_write (bev, buf, n);\n\n    return;\n\nerr:\n    evhtp_connection_free (evhtp_request_get_connection (data->req));\n    free_sendblock_data (data);\n    return;\n}\n\nstatic void\nwrite_data_cb (struct bufferevent *bev, void *ctx)\n{\n    SendfileData *data = ctx;\n    char *blk_id;\n    BlockHandle *handle;\n    char buf[1024 * 64];\n    int n;\n\nnext:\n    blk_id = data->file->blk_sha1s[data->idx];\n\n    if (!data->handle) {\n        data->handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                                     data->store_id,\n                                                     data->repo_version,\n                                                     blk_id, BLOCK_READ);\n        if (!data->handle) {\n            seaf_warning (\"Failed to open block %s:%s\\n\", data->store_id, blk_id);\n            goto err;\n        }\n\n        BlockMetadata *bmd;\n        bmd = seaf_block_manager_stat_block_by_handle (seaf->block_mgr,\n                                                       data->handle);\n        if (!bmd)\n            goto err;\n        data->remain = bmd->size;\n        g_free (bmd);\n\n        if (data->crypt) {\n            if (seafile_decrypt_init (&data->ctx,\n                                      data->crypt->version,\n                                      (unsigned char *)data->crypt->key,\n                                      (unsigned char *)data->crypt->iv) < 0) {\n                seaf_warning (\"Failed to init decrypt.\\n\");\n                goto err;\n            }\n            data->enc_init = TRUE;\n        }\n    }\n    handle = data->handle;\n\n    n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, sizeof(buf));\n    data->remain -= n;\n    if (n < 0) {\n        seaf_warning (\"Error when reading from block %s.\\n\", blk_id);\n        goto err;\n    } else if (n == 0) {\n        /* We've read up the data of this block, finish or try next block. */\n        seaf_block_manager_close_block (seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n        data->handle = NULL;\n        if (data->crypt != NULL) {\n            EVP_CIPHER_CTX_free (data->ctx);\n            data->enc_init = FALSE;\n        }\n\n        if (data->idx == data->file->n_blocks - 1) {\n            /* Recover evhtp's callbacks */\n            bev->readcb = data->saved_read_cb;\n            bev->writecb = data->saved_write_cb;\n            bev->errorcb = data->saved_event_cb;\n            bev->cbarg = data->saved_cb_arg;\n\n            /* Resume reading incomming requests. */\n            evhtp_request_resume (data->req);\n\n            evhtp_send_reply_end (data->req);\n\n            char *oper = \"web-file-download\";\n            if (g_strcmp0(data->token_type, \"download-link\") == 0)\n                oper = \"link-file-download\";\n\n            send_statistic_msg(data->store_id, data->user, oper,\n                               (guint64)data->file->file_size);\n\n            free_sendfile_data (data);\n            return;\n        }\n\n        ++(data->idx);\n        goto next;\n    }\n\n    /* OK, we've got some data to send. */\n    if (data->crypt != NULL) {\n        char *dec_out;\n        int dec_out_len = -1;\n        struct evbuffer *tmp_buf;\n\n        dec_out = g_new (char, n + 16);\n        if (!dec_out) {\n            seaf_warning (\"Failed to alloc memory.\\n\");\n            goto err;\n        }\n\n        int ret = EVP_DecryptUpdate (data->ctx,\n                                     (unsigned char *)dec_out,\n                                     &dec_out_len,\n                                     (unsigned char *)buf,\n                                     n);\n        if (ret == 0) {\n            seaf_warning (\"Decrypt block %s:%s failed.\\n\", data->store_id, blk_id);\n            g_free (dec_out);\n            goto err;\n        }\n\n        tmp_buf = evbuffer_new ();\n\n        evbuffer_add (tmp_buf, dec_out, dec_out_len);\n\n        /* If it's the last piece of a block, call decrypt_final()\n         * to decrypt the possible partial block. */\n        if (data->remain == 0) {\n            ret = EVP_DecryptFinal_ex (data->ctx,\n                                       (unsigned char *)dec_out,\n                                       &dec_out_len);\n            if (ret == 0) {\n                seaf_warning (\"Decrypt block %s:%s failed.\\n\", data->store_id, blk_id);\n                evbuffer_free (tmp_buf);\n                g_free (dec_out);\n                goto err;\n            }\n            evbuffer_add (tmp_buf, dec_out, dec_out_len);\n        }\n        /* This may call write_data_cb() recursively (by libevent_openssl).\n         * SendfileData struct may be free'd in the recursive calls.\n         * So don't use \"data\" variable after here.\n         */\n        bufferevent_write_buffer (bev, tmp_buf);\n\n        evbuffer_free (tmp_buf);\n        g_free (dec_out);\n    } else {\n        bufferevent_write (bev, buf, n);\n    }\n\n    return;\n\nerr:\n    evhtp_connection_free (evhtp_request_get_connection (data->req));\n    free_sendfile_data (data);\n    return;\n}\n\nstatic void\nwrite_dir_data_cb (struct bufferevent *bev, void *ctx)\n{\n    SendDirData *data = ctx;\n    char buf[64 * 1024];\n    int n;\n\n    n = readn (data->zipfd, buf, sizeof(buf));\n    if (n < 0) {\n        seaf_warning (\"Failed to read zipfile %s: %s.\\n\", data->zipfile, strerror (errno));\n        evhtp_connection_free (evhtp_request_get_connection (data->req));\n        free_senddir_data (data);\n    } else if (n > 0) {\n        bufferevent_write (bev, buf, n);\n        data->remain -= n;\n\n        if (data->remain == 0) {\n            /* Recover evhtp's callbacks */\n            bev->readcb = data->saved_read_cb;\n            bev->writecb = data->saved_write_cb;\n            bev->errorcb = data->saved_event_cb;\n            bev->cbarg = data->saved_cb_arg;\n\n            /* Resume reading incomming requests. */\n            evhtp_request_resume (data->req);\n\n            evhtp_send_reply_end (data->req);\n\n            char *oper = \"web-file-download\";\n            if (g_strcmp0(data->token_type, \"download-dir-link\") == 0 ||\n                g_strcmp0(data->token_type, \"download-multi-link\") == 0)\n                oper = \"link-file-download\";\n\n            send_statistic_msg(data->repo_id, data->user, oper, data->total_size);\n\n            free_senddir_data (data);\n            return;\n        }\n    }\n}\n\nstatic void\nmy_block_event_cb (struct bufferevent *bev, short events, void *ctx)\n{\n    SendBlockData *data = ctx;\n\n    data->saved_event_cb (bev, events, data->saved_cb_arg);\n\n    /* Free aux data. */\n    free_sendblock_data (data);\n}\n\nstatic void\nmy_event_cb (struct bufferevent *bev, short events, void *ctx)\n{\n    SendfileData *data = ctx;\n\n    data->saved_event_cb (bev, events, data->saved_cb_arg);\n\n    /* Free aux data. */\n    free_sendfile_data (data);\n}\n\nstatic void\nfile_range_event_cb (struct bufferevent *bev, short events, void *ctx)\n{\n    SendFileRangeData *data = ctx;\n\n    data->saved_event_cb (bev, events, data->saved_cb_arg);\n\n    /* Free aux data. */\n    free_send_file_range_data (data);\n}\n\nstatic void\nmy_dir_event_cb (struct bufferevent *bev, short events, void *ctx)\n{\n    SendDirData *data = ctx;\n\n    data->saved_event_cb (bev, events, data->saved_cb_arg);\n\n    /* Free aux data. */\n    free_senddir_data (data);\n}\n\nstatic char *\nparse_content_type(const char *filename)\n{\n    char *p;\n    int i;\n\n    if ((p = strrchr(filename, '.')) == NULL)\n        return NULL;\n    p++;\n\n    char *lower = g_utf8_strdown (p, strlen(p));\n\n    for (i = 0; ftmap[i].suffix != NULL; i++) {\n        if (strcmp(lower, ftmap[i].suffix) == 0) {\n            g_free (lower);\n            return ftmap[i].type;\n        }\n    }\n\n    g_free (lower);\n    return NULL;\n}\n\nstatic gboolean\ntest_firefox (evhtp_request_t *req)\n{\n    const char *user_agent = evhtp_header_find (req->headers_in, \"User-Agent\");\n    if (!user_agent)\n        return FALSE;\n\n    GString *s = g_string_new (user_agent);\n    if (g_strrstr (g_string_ascii_down (s)->str, \"firefox\")) {\n        g_string_free (s, TRUE);\n        return TRUE;\n    }\n    else {\n        g_string_free (s, TRUE);\n        return FALSE;\n    }\n}\n\nstatic int\ndo_file(evhtp_request_t *req, SeafRepo *repo, const char *file_id,\n        const char *filename, const char *operation,\n        SeafileCryptKey *crypt_key, const char *user)\n{\n    Seafile *file;\n    char *type = NULL;\n    char file_size[255];\n    gchar *content_type = NULL;\n    char cont_filename[SEAF_PATH_MAX];\n    char *key_hex, *iv_hex;\n    unsigned char enc_key[32], enc_iv[16];\n    SeafileCrypt *crypt = NULL;\n    SendfileData *data;\n    char *policy = \"sandbox\";\n\n    file = seaf_fs_manager_get_seafile(seaf->fs_mgr,\n                                       repo->store_id, repo->version, file_id);\n    if (file == NULL)\n        return -1;\n\n    if (crypt_key != NULL) {\n        g_object_get (crypt_key,\n                      \"key\", &key_hex,\n                      \"iv\", &iv_hex,\n                      NULL);\n        if (repo->enc_version == 1)\n            hex_to_rawdata (key_hex, enc_key, 16);\n        else\n            hex_to_rawdata (key_hex, enc_key, 32);\n        hex_to_rawdata (iv_hex, enc_iv, 16);\n        crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv);\n        g_free (key_hex);\n        g_free (iv_hex);\n    }\n\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                              \"*\", 1, 1));\n\n    type = parse_content_type(filename);\n    if (type != NULL) {\n        if (strstr(type, \"text\")) {\n            content_type = g_strjoin(\"; \", type, \"charset=gbk\", NULL);\n        } else {\n            content_type = g_strdup (type);\n        }\n\n        evhtp_headers_add_header(req->headers_out,\n                                 evhtp_header_new(\"Content-Type\",\n                                                  content_type, 1, 1));\n        g_free (content_type);\n\n        if (g_strcmp0 (type, \"image/svg+xml\") == 0) {\n            evhtp_headers_add_header(req->headers_out,\n                                     evhtp_header_new(\"Content-Security-Policy\",\n                                                      policy, 1, 1));\n        }\n    } else\n        evhtp_headers_add_header (req->headers_out,\n                                  evhtp_header_new(\"Content-Type\",\n                                                   \"application/octet-stream\", 1, 1));\n\n    snprintf(file_size, sizeof(file_size), \"%\"G_GINT64_FORMAT\"\", file->file_size);\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Content-Length\", file_size, 1, 1));\n\n    char *esc_filename = g_uri_escape_string(filename, NULL, FALSE);\n    if (strcmp(operation, \"download\") == 0 ||\n        strcmp(operation, \"download-link\") == 0) {\n        /* Safari doesn't support 'utf8', 'utf-8' is compatible with most of browsers. */\n        snprintf(cont_filename, SEAF_PATH_MAX,\n                 \"attachment;filename*=utf-8''%s;filename=\\\"%s\\\"\", esc_filename, filename);\n    } else {\n        snprintf(cont_filename, SEAF_PATH_MAX,\n                 \"inline;filename*=utf-8''%s;filename=\\\"%s\\\"\", esc_filename, filename);\n    }\n    g_free (esc_filename);\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Content-Disposition\", cont_filename,\n                                              1, 1));\n\n    if (g_strcmp0 (type, \"image/jpg\") != 0) {\n        evhtp_headers_add_header(req->headers_out,\n                                 evhtp_header_new(\"X-Content-Type-Options\", \"nosniff\",\n                                                  1, 1));\n    }\n    /* HEAD Request */\n    if (evhtp_request_get_method(req) == htp_method_HEAD) {\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        seafile_unref (file);\n        g_free (crypt);\n        return 0;\n    }\n\n    /* If it's an empty file, send an empty reply. */\n    if (file->n_blocks == 0) {\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        seafile_unref (file);\n        g_free (crypt);\n        return 0;\n    }\n\n    data = g_new0 (SendfileData, 1);\n    data->req = req;\n    data->file = file;\n    data->crypt = crypt;\n    data->user = g_strdup(user);\n    data->token_type = g_strdup (operation);\n\n    memcpy (data->store_id, repo->store_id, 36);\n    data->repo_version = repo->version;\n\n    /* We need to overwrite evhtp's callback functions to\n     * write file data piece by piece.\n     */\n    struct bufferevent *bev = evhtp_request_get_bev (req);\n    data->saved_read_cb = bev->readcb;\n    data->saved_write_cb = bev->writecb;\n    data->saved_event_cb = bev->errorcb;\n    data->saved_cb_arg = bev->cbarg;\n    bufferevent_setcb (bev,\n                       NULL,\n                       write_data_cb,\n                       my_event_cb,\n                       data);\n    /* Block any new request from this connection before finish\n     * handling this request.\n     */\n    evhtp_request_pause (req);\n\n    /* Kick start data transfer by sending out http headers. */\n    evhtp_send_reply_start(req, EVHTP_RES_OK);\n\n    return 0;\n}\n\n// get block handle for range start\nstatic BlockHandle *\nget_start_block_handle (const char *store_id, int version, Seafile *file,\n                        guint64 start, int *blk_idx)\n{\n    BlockHandle *handle = NULL;\n    BlockMetadata *bmd;\n    char *blkid;\n    guint64 tolsize = 0;\n    int i = 0;\n\n    for (; i < file->n_blocks; i++) {\n        blkid = file->blk_sha1s[i];\n\n        bmd = seaf_block_manager_stat_block(seaf->block_mgr, store_id,\n                                            version, blkid);\n        if (!bmd)\n            return NULL;\n\n        if (start < tolsize + bmd->size) {\n            g_free (bmd);\n            break;\n        }\n        tolsize += bmd->size;\n        g_free (bmd);\n    }\n\n    /* beyond the file size */\n    if (i == file->n_blocks)\n        return NULL;\n\n    handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                           store_id, version,\n                                           blkid, BLOCK_READ);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s:%s.\\n\", store_id, blkid);\n        return NULL;\n    }\n\n    /* trim the offset in a block */\n    if (start > tolsize) {\n        char *tmp = (char *)malloc(sizeof(*tmp) * (start - tolsize));\n        if (!tmp)\n            goto err;\n\n        int n = seaf_block_manager_read_block(seaf->block_mgr, handle,\n                                              tmp, start-tolsize);\n        if (n != start-tolsize) {\n            seaf_warning (\"Failed to read block %s:%s.\\n\", store_id, blkid);\n            free (tmp);\n            goto err;\n        }\n        free (tmp);\n    }\n\n    *blk_idx = i;\n    return handle;\n\nerr:\n    seaf_block_manager_close_block(seaf->block_mgr, handle);\n    seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n    return NULL;\n}\n\nstatic void\nfinish_file_range_request (struct bufferevent *bev, SendFileRangeData *data)\n{\n    /* Recover evhtp's callbacks */\n    bev->readcb = data->saved_read_cb;\n    bev->writecb = data->saved_write_cb;\n    bev->errorcb = data->saved_event_cb;\n    bev->cbarg = data->saved_cb_arg;\n\n    /* Resume reading incomming requests. */\n    evhtp_request_resume (data->req);\n\n    evhtp_send_reply_end (data->req);\n\n    free_send_file_range_data (data);\n}\n\nstatic void\nwrite_file_range_cb (struct bufferevent *bev, void *ctx)\n{\n    SendFileRangeData *data = ctx;\n    char *blk_id;\n    char buf[BUFFER_SIZE];\n    int bsize;\n    int n;\n\n    if (data->blk_idx == -1) {\n        // start to send block\n        data->handle = get_start_block_handle (data->store_id, data->repo_version,\n                                               data->file, data->start_off,\n                                               &data->blk_idx);\n        if (!data->handle)\n            goto err;\n    }\n\nnext:\n    blk_id = data->file->blk_sha1s[data->blk_idx];\n\n    if (!data->handle) {\n        data->handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                                     data->store_id,\n                                                     data->repo_version,\n                                                     blk_id, BLOCK_READ);\n        if (!data->handle) {\n            seaf_warning (\"Failed to open block %s:%s\\n\", data->store_id, blk_id);\n            goto err;\n        }\n    }\n\n    bsize = data->range_remain < BUFFER_SIZE ? data->range_remain : BUFFER_SIZE;\n    n = seaf_block_manager_read_block(seaf->block_mgr, data->handle, buf, bsize);\n    data->range_remain -= n;\n    if (n < 0) {\n        seaf_warning (\"Error when reading from block %s:%s.\\n\",\n                      data->store_id, blk_id);\n        goto err;\n    } else if (n == 0) {\n        seaf_block_manager_close_block (seaf->block_mgr, data->handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, data->handle);\n        data->handle = NULL;\n        ++data->blk_idx;\n        goto next;\n    }\n\n    bufferevent_write (bev, buf, n);\n    if (data->range_remain == 0) {\n        if (data->start_off + n >= data->file->file_size) {\n            char *oper = \"web-file-download\";\n            if (g_strcmp0(data->token_type, \"download-link\") == 0)\n                oper = \"link-file-download\";\n\n            send_statistic_msg (data->store_id, data->user, oper,\n                                (guint64)data->file->file_size);\n        }\n        finish_file_range_request (bev, data);\n    }\n\n    return;\n\nerr:\n    evhtp_connection_free (evhtp_request_get_connection (data->req));\n    free_send_file_range_data (data);\n}\n\n// parse range offset, only support single range (-num, num-num, num-)\nstatic gboolean\nparse_range_val (const char *byte_ranges, guint64 *pstart, guint64 *pend,\n                 guint64 fsize)\n{\n    char *ranges = strchr(byte_ranges, '=');\n    if (!ranges) {\n        return FALSE;\n    }\n\n    char *minus;\n    char *end_ptr;\n    gboolean error = FALSE;\n    char *ranges_dup = g_strdup (ranges + 1);\n    char *tmp = ranges_dup;\n    guint64 start;\n    guint64 end;\n\n    minus = strchr(tmp, '-');\n    if (!minus)\n        return FALSE;\n\n    if (minus == tmp) {\n        // -num mode\n        start = strtoll(tmp, &end_ptr, 10);\n        if (start == 0) {\n            // range format is invalid\n            error = TRUE;\n        } else if (*end_ptr == '\\0') {\n            end = fsize - 1;\n            start += fsize;\n        } else {\n            error = TRUE;\n        }\n    } else if (*(minus + 1) == '\\0') {\n        // num- mode\n        start = strtoll(tmp, &end_ptr, 10);\n        if (end_ptr == minus) {\n            end = fsize - 1;\n        } else {\n            error = TRUE;\n        }\n    } else {\n        // num-num mode\n        start = strtoll(tmp, &end_ptr, 10);\n        if (end_ptr == minus) {\n            end = strtoll(minus + 1, &end_ptr, 10);\n            if (*end_ptr != '\\0') {\n                error = TRUE;\n            }\n        } else {\n            error = TRUE;\n        }\n    }\n\n    g_free (ranges_dup);\n\n    if (error)\n        return FALSE;\n\n    if (end > fsize - 1) {\n        end = fsize - 1;\n    }\n    if (start > end) {\n        // Range format is valid, but range number is invalid\n        return FALSE;\n    }\n\n    *pstart = start;\n    *pend = end;\n\n    return TRUE;\n}\n\nstatic void\nset_resp_disposition (evhtp_request_t *req, const char *operation,\n                      const char *filename)\n{\n    char *cont_filename = NULL;\n    char *esc_filename = g_uri_escape_string(filename, NULL, FALSE);\n\n    if (strcmp(operation, \"download\") == 0) {\n        cont_filename = g_strdup_printf(\"attachment;filename*=utf-8''%s;filename=\\\"%s\\\"\",\n                                        esc_filename, filename);\n    } else {\n        cont_filename = g_strdup_printf(\"inline;filename*=utf-8''%s;filename=\\\"%s\\\"\",\n                                        esc_filename, filename);\n    }\n\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Content-Disposition\", cont_filename,\n                                              0, 1));\n    g_free (esc_filename);\n    g_free (cont_filename);\n}\n\nstatic int\ndo_file_range (evhtp_request_t *req, SeafRepo *repo, const char *file_id,\n               const char *filename, const char *operation, const char *byte_ranges,\n               const char *user)\n{\n    Seafile *file;\n    SendFileRangeData *data = NULL;\n    guint64 start;\n    guint64 end;\n    char *policy = \"sandbox\";\n\n    file = seaf_fs_manager_get_seafile(seaf->fs_mgr,\n                                       repo->store_id, repo->version, file_id);\n    if (file == NULL)\n        return -1;\n\n    /* If it's an empty file, send an empty reply. */\n    if (file->n_blocks == 0) {\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        seafile_unref (file);\n        return 0;\n    }\n\n    if (!parse_range_val (byte_ranges, &start, &end, file->file_size)) {\n        seafile_unref (file);\n        char *con_range = g_strdup_printf (\"bytes */%\"G_GUINT64_FORMAT, file->file_size);\n        evhtp_headers_add_header (req->headers_out,\n                                  evhtp_header_new(\"Content-Range\", con_range,\n                                                   0, 1));\n        g_free (con_range);\n        evhtp_send_reply (req, EVHTP_RES_RANGENOTSC);\n        return 0;\n    }\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new (\"Accept-Ranges\", \"bytes\", 0, 0));\n\n    char *content_type = NULL;\n    char *type = parse_content_type (filename);\n    if (type != NULL) {\n        if (strstr(type, \"text\")) {\n            content_type = g_strjoin(\"; \", type, \"charset=gbk\", NULL);\n        } else {\n            content_type = g_strdup (type);\n        }\n\n        if (g_strcmp0 (type, \"image/svg+xml\") == 0) {\n            evhtp_headers_add_header(req->headers_out,\n                                     evhtp_header_new(\"Content-Security-Policy\",\n                                                      policy, 1, 1));\n        }\n    } else {\n        content_type = g_strdup (\"application/octet-stream\");\n    }\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new (\"Content-Type\", content_type, 0, 1));\n    g_free (content_type);\n\n    char *con_len = g_strdup_printf (\"%\"G_GUINT64_FORMAT, end-start+1);\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Content-Length\", con_len, 0, 1));\n    g_free (con_len);\n\n    char *con_range = g_strdup_printf (\"%s %\"G_GUINT64_FORMAT\"-%\"G_GUINT64_FORMAT\n                                       \"/%\"G_GUINT64_FORMAT, \"bytes\",\n                                       start, end, file->file_size);\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new (\"Content-Range\", con_range, 0, 1));\n    g_free (con_range);\n\n    set_resp_disposition (req, operation, filename);\n\n    if (g_strcmp0 (type, \"image/jpg\") != 0) {\n        evhtp_headers_add_header(req->headers_out,\n                                 evhtp_header_new(\"X-Content-Type-Options\", \"nosniff\",\n                                                  1, 1));\n    }\n\n    data = g_new0 (SendFileRangeData, 1);\n    if (!data) {\n        seafile_unref (file);\n        return -1;\n    }\n    data->req = req;\n    data->file = file;\n    data->blk_idx = -1;\n    data->start_off = start;\n    data->range_remain = end-start+1;\n    data->user = g_strdup(user);\n    data->token_type = g_strdup (operation);\n\n    memcpy (data->store_id, repo->store_id, 36);\n    data->repo_version = repo->version;\n\n    /* We need to overwrite evhtp's callback functions to\n     * write file data piece by piece.\n     */\n    struct bufferevent *bev = evhtp_request_get_bev (req);\n    data->saved_read_cb = bev->readcb;\n    data->saved_write_cb = bev->writecb;\n    data->saved_event_cb = bev->errorcb;\n    data->saved_cb_arg = bev->cbarg;\n    bufferevent_setcb (bev,\n                       NULL,\n                       write_file_range_cb,\n                       file_range_event_cb,\n                       data);\n\n\n    /* Block any new request from this connection before finish\n     * handling this request.\n     */\n    evhtp_request_pause (req);\n\n    /* Kick start data transfer by sending out http headers. */\n    evhtp_send_reply_start(req, EVHTP_RES_PARTIAL);\n\n    return 0;\n}\n\nstatic int\nstart_download_zip_file (evhtp_request_t *req, const char *token,\n                         const char *zipname, char *zipfile,\n                         const char *repo_id, const char *user, const char *token_type)\n{\n    SeafStat st;\n    char file_size[255];\n    char cont_filename[SEAF_PATH_MAX];\n    int zipfd = 0;\n\n    if (seaf_stat(zipfile, &st) < 0) {\n        seaf_warning (\"Failed to stat %s: %s.\\n\", zipfile, strerror(errno));\n        return -1;\n    }\n\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Content-Type\", \"application/zip\", 1, 1));\n\n    snprintf (file_size, sizeof(file_size), \"%\"G_GUINT64_FORMAT\"\", st.st_size);\n    evhtp_headers_add_header (req->headers_out,\n            evhtp_header_new(\"Content-Length\", file_size, 1, 1));\n\n    char *zippath = g_strdup_printf(\"%s.zip\", zipname);\n    char *esc_zippath = g_uri_escape_string(zippath, NULL, FALSE);\n\n    snprintf(cont_filename, SEAF_PATH_MAX,\n             \"attachment;filename*=utf-8''%s;filename=\\\"%s\\\"\", esc_zippath, zippath);\n\n    g_free (zippath);\n    g_free (esc_zippath);\n\n    evhtp_headers_add_header(req->headers_out,\n            evhtp_header_new(\"Content-Disposition\", cont_filename, 1, 1));\n\n    zipfd = g_open (zipfile, O_RDONLY | O_BINARY, 0);\n    if (zipfd < 0) {\n        seaf_warning (\"Failed to open zipfile %s: %s.\\n\", zipfile, strerror(errno));\n        return -1;\n    }\n\n    SendDirData *data;\n    data = g_new0 (SendDirData, 1);\n    data->req = req;\n    data->zipfd = zipfd;\n    data->zipfile = zipfile;\n    data->token = g_strdup (token);\n    data->remain = st.st_size;\n    data->total_size = (guint64)st.st_size;\n    data->user = g_strdup (user);\n    data->token_type = g_strdup (token_type);\n    snprintf(data->repo_id, sizeof(data->repo_id), \"%s\", repo_id);\n\n    /* We need to overwrite evhtp's callback functions to\n     * write file data piece by piece.\n     */\n    struct bufferevent *bev = evhtp_request_get_bev (req);\n    data->saved_read_cb = bev->readcb;\n    data->saved_write_cb = bev->writecb;\n    data->saved_event_cb = bev->errorcb;\n    data->saved_cb_arg = bev->cbarg;\n    bufferevent_setcb (bev,\n                       NULL,\n                       write_dir_data_cb,\n                       my_dir_event_cb,\n                       data);\n    /* Block any new request from this connection before finish\n     * handling this request.\n     */\n    evhtp_request_pause (req);\n\n    /* Kick start data transfer by sending out http headers. */\n    evhtp_send_reply_start(req, EVHTP_RES_OK);\n\n    return 0;\n}\n\nstatic void\nset_etag (evhtp_request_t *req,\n          const char *file_id)\n{\n    evhtp_kv_t *kv;\n\n    kv = evhtp_kv_new (\"ETag\", file_id, 1, 1);\n    evhtp_kvs_add_kv (req->headers_out, kv);\n}\n\nstatic void\nset_no_cache (evhtp_request_t *req, gboolean private_cache)\n{\n    evhtp_kv_t *kv;\n\n    if (private_cache) {\n        kv = evhtp_kv_new (\"Cache-Control\", \"private, no-cache\", 1, 1);\n    } else {\n        kv = evhtp_kv_new (\"Cache-Control\", \"public, no-cache\", 1, 1);\n    }\n    evhtp_kvs_add_kv (req->headers_out, kv);\n}\n\nstatic gboolean\ncan_use_cached_content (evhtp_request_t *req)\n{\n    if (evhtp_kv_find (req->headers_in, \"If-Modified-Since\") != NULL) {\n        evhtp_send_reply (req, EVHTP_RES_NOTMOD);\n        return TRUE;\n    }\n\n    char http_date[256];\n    evhtp_kv_t *kv;\n    time_t now = time(NULL);\n\n    /* Set Last-Modified header if the client gets this file\n     * for the first time. So that the client will set\n     * If-Modified-Since header the next time it gets the same\n     * file.\n     */\n#ifndef WIN32\n    strftime (http_date, sizeof(http_date), \"%a, %d %b %Y %T GMT\",\n              gmtime(&now));\n#else\n    strftime (http_date, sizeof(http_date), \"%a, %d %b %Y %H:%M:%S GMT\",\n              gmtime(&now));\n#endif\n    kv = evhtp_kv_new (\"Last-Modified\", http_date, 1, 1);\n    evhtp_kvs_add_kv (req->headers_out, kv);\n\n    kv = evhtp_kv_new (\"Cache-Control\", \"max-age=3600\", 1, 1);\n    evhtp_kvs_add_kv (req->headers_out, kv);\n\n    return FALSE;\n}\n\nstatic void\naccess_zip_cb (evhtp_request_t *req, void *arg)\n{\n    char *token;\n    SeafileWebAccess *info = NULL;\n    char *info_str = NULL;\n    json_t *info_obj = NULL;\n    json_error_t jerror;\n    char *filename = NULL;\n    char *repo_id = NULL;\n    char *user = NULL;\n    char *zip_file_path;\n    char *token_type = NULL;\n    const char *error = NULL;\n    int error_code;\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (g_strv_length (parts) != 2) {\n        error = \"Invalid URL\\n\";\n        error_code = EVHTP_RES_BADREQ;\n        goto out;\n    }\n\n    token = parts[1];\n    info = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token);\n    // Here only check token exist, follow will get zip file path, if zip file path exist\n    // then the token is valid, because it pass some validations in zip stage\n    if (!info) {\n        error = \"Access token not found\\n\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto out;\n    }\n\n    g_object_get (info, \"obj_id\", &info_str, NULL);\n    if (!info_str) {\n        seaf_warning (\"Invalid obj_id for token: %s.\\n\", token);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    info_obj = json_loadb (info_str, strlen(info_str), 0, &jerror);\n    if (!info_obj) {\n        seaf_warning (\"Failed to parse obj_id field: %s for token: %s.\\n\", jerror.text, token);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    if (json_object_has_member (info_obj, \"dir_name\")) {\n        // Download dir\n        filename = g_strdup (json_object_get_string_member (info_obj, \"dir_name\"));\n    } else if (json_object_has_member (info_obj, \"file_list\")) {\n        // Download multi\n        time_t now = time(NULL);\n        char date_str[11];\n        strftime(date_str, sizeof(date_str), \"%Y-%m-%d\", localtime(&now));\n        filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL);\n    } else {\n        seaf_warning (\"No dir_name or file_list in obj_id for token: %s.\\n\", token);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    zip_file_path = zip_download_mgr_get_zip_file_path (seaf->zip_download_mgr, token);\n    if (!zip_file_path) {\n        g_object_get (info, \"repo_id\", &repo_id, NULL);\n        seaf_warning (\"Failed to get zip file path for %s in repo %.8s, token:[%s].\\n\",\n                      filename, repo_id, token);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    if (can_use_cached_content (req)) {\n        // Clean zip progress related resource\n        zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, token);\n        goto out;\n    }\n\n    g_object_get (info, \"username\", &user, NULL);\n    g_object_get (info, \"repo_id\", &repo_id, NULL);\n    g_object_get (info, \"op\", &token_type, NULL);\n    int ret = start_download_zip_file (req, token, filename, zip_file_path, repo_id, user, token_type);\n    if (ret < 0) {\n        seaf_warning (\"Failed to start download zip file: %s for token: %s\", filename, token);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n    }\n\nout:\n    g_strfreev (parts);\n    if (info)\n        g_object_unref (info);\n    if (info_str)\n        g_free (info_str);\n    if (info_obj)\n        json_decref (info_obj);\n    if (filename)\n        g_free (filename);\n    if (repo_id)\n        g_free (repo_id);\n    if (user)\n        g_free (user);\n    if (token_type)\n        g_free (token_type);\n\n    if (error) {\n        evbuffer_add_printf(req->buffer_out, \"%s\\n\", error);\n        evhtp_send_reply(req, error_code);\n    }\n}\n\n/*\nstatic void\naccess_zip_link_cb (evhtp_request_t *req, void *arg)\n{\n    char *token;\n    char *user = NULL;\n    char *zip_file_path;\n    char *zip_file_name;\n    const char *repo_id = NULL;\n    const char *task_id = NULL;\n    const char *error = NULL;\n    int error_code;\n    SeafileShareLinkInfo *info = NULL;\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (g_strv_length (parts) != 2) {\n        error = \"Invalid URL\\n\";\n        error_code = EVHTP_RES_BADREQ;\n        goto out;\n    }\n\n    token = parts[1];\n\n    task_id = evhtp_kv_find (req->uri->query, \"task_id\");\n    if (!task_id) {\n        error = \"No task_id\\n\";\n        error_code = EVHTP_RES_BADREQ;\n        goto out;\n    }\n\n    info = http_tx_manager_query_share_link_info (token, \"dir\");\n    if (!info) {\n        error = \"Access token not found\\n\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto out;\n    }\n\n    repo_id = seafile_share_link_info_get_repo_id (info);\n    user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n\n    zip_file_path = zip_download_mgr_get_zip_file_path (seaf->zip_download_mgr, task_id);\n    if (!zip_file_path) {\n        seaf_warning (\"Failed to get zip file path in repo %.8s, task id:[%s].\\n\", repo_id, task_id);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n    zip_file_name = zip_download_mgr_get_zip_file_name (seaf->zip_download_mgr, task_id);\n    if (!zip_file_name) {\n        seaf_warning (\"Failed to get zip file name in repo %.8s, task id:[%s].\\n\", repo_id, task_id);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    if (can_use_cached_content (req)) {\n        // Clean zip progress related resource\n        zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, task_id);\n        goto out;\n    }\n\n    int ret = start_download_zip_file (req, task_id, zip_file_name, zip_file_path, repo_id, user, \"download-multi-link\");\n    if (ret < 0) {\n        seaf_warning (\"Failed to start download zip file: %s for task: %s\", zip_file_name, task_id);\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n    }\n\nout:\n    g_strfreev (parts);\n    if (info)\n        g_object_unref (info);\n    if (user)\n        g_free (user);\n\n    if (error) {\n        evbuffer_add_printf(req->buffer_out, \"%s\\n\", error);\n        evhtp_send_reply(req, error_code);\n    }\n}\n*/\n\nstatic void\naccess_cb(evhtp_request_t *req, void *arg)\n{\n    SeafRepo *repo = NULL;\n    char *error = NULL;\n    char *token = NULL;\n    char *filename = NULL;\n    char *dec_filename = NULL;\n    const char *repo_id = NULL;\n    const char *data = NULL;\n    const char *operation = NULL;\n    const char *user = NULL;\n    const char *byte_ranges = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n\n    SeafileCryptKey *key = NULL;\n    SeafileWebAccess *webaccess = NULL;\n\n    /* Skip the first '/'. */\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 3 ||\n        strcmp (parts[0], \"files\") != 0) {\n        error = \"Invalid URL\";\n        goto on_error;\n    }\n\n    token = parts[1];\n    filename = parts[2];\n\n    // The filename is url-encoded.\n    dec_filename = g_uri_unescape_string(filename, NULL);\n\n    webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token);\n    if (!webaccess) {\n        error = \"Access token not found\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto on_error;\n    }\n\n    repo_id = seafile_web_access_get_repo_id (webaccess);\n    data = seafile_web_access_get_obj_id (webaccess);\n    operation = seafile_web_access_get_op (webaccess);\n    user = seafile_web_access_get_username (webaccess);\n\n    if (strcmp(operation, \"view\") != 0 &&\n        strcmp(operation, \"download\") != 0 &&\n        strcmp(operation, \"download-link\") != 0) {\n        error = \"Operation does not match access token.\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto on_error;\n    }\n\n    set_etag (req, data);\n\n    if (can_use_cached_content (req)) {\n        goto success;\n    }\n\n    byte_ranges = evhtp_kv_find (req->headers_in, \"Range\");\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        error = \"Bad repo id\\n\";\n        goto on_error;\n    }\n\n    if (repo->encrypted) {\n        key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                                   repo_id, user);\n        if (!key) {\n            error = \"Repo is encrypted. Please provide password to view it.\";\n            goto on_error;\n        }\n    }\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo->store_id, repo->version, data)) {\n        error = \"Invalid file id\\n\";\n        goto on_error;\n    }\n\n    if (!repo->encrypted && byte_ranges) {\n        if (do_file_range (req, repo, data, dec_filename, operation, byte_ranges, user) < 0) {\n            error = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            goto on_error;\n        }\n    } else if (do_file(req, repo, data, dec_filename, operation, key, user) < 0) {\n        error = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto on_error;\n    }\n\nsuccess:\n    g_free (dec_filename);\n    g_strfreev (parts);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n    if (webaccess)\n        g_object_unref (webaccess);\n\n    return;\n\non_error:\n    g_free (dec_filename);\n    g_strfreev (parts);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n    if (webaccess != NULL)\n        g_object_unref (webaccess);\n\n    evbuffer_add_printf(req->buffer_out, \"%s\\n\", error);\n    evhtp_send_reply(req, error_code);\n}\n\nstatic void\naccess_v2_cb(evhtp_request_t *req, void *arg)\n{\n    SeafRepo *repo = NULL;\n    char *error_str = NULL;\n    char *err_msg = NULL;\n    char *token = NULL;\n    char *user = NULL;\n    char *dec_path = NULL;\n    char *rpath = NULL;\n    char *filename = NULL;\n    char *file_id = NULL;\n    char *ip_addr = NULL;\n    const char *repo_id = NULL;\n    const char *path = NULL;\n    const char *operation = NULL;\n    const char *byte_ranges = NULL;\n    const char *auth_token = NULL;\n    const char *cookie = NULL;\n    const char *user_agent = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n\n    SeafileCryptKey *key = NULL;\n    GError *error = NULL;\n\n    /* Skip the first '/'. */\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 4);\n    if (!parts || g_strv_length (parts) < 4 ||\n        strcmp (parts[2], \"files\") != 0) {\n        error_str = \"Invalid URL\\n\";\n        goto out;\n    }\n\n    repo_id = parts[1];\n\n    path = parts[3];\n    if (!path) {\n        error_str = \"No file path\\n\";\n        goto out;\n    }\n    dec_path = g_uri_unescape_string(path, NULL);\n    rpath = format_dir_path (dec_path);\n    filename = g_path_get_basename (rpath);\n\n    operation = evhtp_kv_find (req->uri->query, \"op\");\n    if (!operation) {\n        error_str = \"No operation\\n\";\n        goto out;\n    }\n    if (strcmp(operation, \"view\") != 0 &&\n        strcmp(operation, \"download\") != 0) {\n        error_str = \"Operation is neither view or download\\n\";\n        goto out;\n    }\n\n\n    auth_token = evhtp_kv_find (req->headers_in, \"Authorization\");\n    token = seaf_parse_auth_token (auth_token);\n    cookie = evhtp_kv_find (req->headers_in, \"Cookie\");\n    ip_addr = get_client_ip_addr (req);\n    user_agent = evhtp_header_find (req->headers_in, \"User-Agent\");\n    if (!token && !cookie) {\n        error_str = \"Both token and cookie are not set\\n\";\n        goto out;\n    }\n    int status = HTTP_OK;\n    if (http_tx_manager_check_file_access (repo_id, token, cookie, dec_path, \"download\", ip_addr, user_agent, &user, &status, &err_msg) < 0) {\n        if (status != HTTP_OK) {\n            error_str = err_msg;\n            error_code = status;\n        } else {\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n        }\n        goto out;\n    }\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        error_str = \"Bad repo id\\n\";\n        goto out;\n    }\n\n    file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, rpath, &error);\n    if (!file_id) {\n        error_str = \"Invalid file_path\\n\";\n        if (error)\n            g_clear_error(&error);\n        goto out;\n    }\n\n    const char *etag = evhtp_kv_find (req->headers_in, \"If-None-Match\");\n    if (g_strcmp0 (etag, file_id) == 0) {\n        evhtp_send_reply (req, EVHTP_RES_NOTMOD);\n        error_code = EVHTP_RES_OK;\n        goto out;\n    }\n    set_etag (req, file_id);\n    set_no_cache (req, TRUE);\n\n    byte_ranges = evhtp_kv_find (req->headers_in, \"Range\");\n\n    if (repo->encrypted) {\n        key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                                   repo_id, user);\n        if (!key) {\n            error_str = \"Repo is encrypted. Please provide password to view it.\";\n            goto out;\n        }\n    }\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo->store_id, repo->version, file_id)) {\n        error_str = \"Invalid file id\\n\";\n        goto out;\n    }\n\n    if (!repo->encrypted && byte_ranges) {\n        if (do_file_range (req, repo, file_id, filename, operation, byte_ranges, user) < 0) {\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            goto out;\n        }\n    } else if (do_file(req, repo, file_id, filename, operation, key, user) < 0) {\n        error_str = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    error_code = EVHTP_RES_OK;\n\nout:\n    g_strfreev (parts);\n    g_free (token);\n    g_free (user);\n    g_free (dec_path);\n    g_free (rpath);\n    g_free (filename);\n    g_free (file_id);\n    g_free (ip_addr);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n\n    if (error_code != EVHTP_RES_OK) {\n        evbuffer_add_printf(req->buffer_out, \"%s\\n\", error_str);\n        evhtp_send_reply(req, error_code);\n    }\n    g_free (err_msg);\n}\n\nstatic int\ndo_block(evhtp_request_t *req, SeafRepo *repo, const char *user, const char *file_id,\n         const char *blk_id)\n{\n    Seafile *file;\n    uint32_t bsize;\n    gboolean found = FALSE;\n    int i;\n    char blk_size[255];\n    char cont_filename[SEAF_PATH_MAX];\n    SendBlockData *data;\n\n    file = seaf_fs_manager_get_seafile(seaf->fs_mgr,\n                                       repo->store_id, repo->version, file_id);\n    if (file == NULL)\n        return -1;\n\n    for (i = 0; i < file->n_blocks; i++) {\n        if (memcmp(file->blk_sha1s[i], blk_id, 40) == 0) {\n            BlockMetadata *bm = seaf_block_manager_stat_block (seaf->block_mgr,\n                                                               repo->store_id,\n                                                               repo->version,\n                                                               blk_id);\n            if (bm && bm->size >= 0) {\n                bsize = bm->size;\n                found = TRUE;\n            }\n            g_free (bm);\n            break;\n        }\n    }\n\n    seafile_unref (file);\n\n    /* block not found. */\n    if (!found) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return 0;\n    }\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                              \"*\", 1, 1));\n\n    if (test_firefox (req)) {\n        snprintf(cont_filename, SEAF_PATH_MAX,\n                 \"attachment;filename*=\\\"utf-8\\' \\'%s\\\"\", blk_id);\n    } else {\n        snprintf(cont_filename, SEAF_PATH_MAX,\n                 \"attachment;filename=\\\"%s\\\"\", blk_id);\n    }\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Content-Disposition\", cont_filename,\n                                              1, 1));\n\n    snprintf(blk_size, sizeof(blk_size), \"%\"G_GUINT32_FORMAT\"\", bsize);\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Content-Length\", blk_size, 1, 1));\n\n    data = g_new0 (SendBlockData, 1);\n    data->req = req;\n    data->block_id = g_strdup(blk_id);\n    data->user = g_strdup(user);\n\n    memcpy (data->store_id, repo->store_id, 36);\n    data->repo_version = repo->version;\n\n    /* We need to overwrite evhtp's callback functions to\n     * write file data piece by piece.\n     */\n    struct bufferevent *bev = evhtp_request_get_bev (req);\n    data->saved_read_cb = bev->readcb;\n    data->saved_write_cb = bev->writecb;\n    data->saved_event_cb = bev->errorcb;\n    data->saved_cb_arg = bev->cbarg;\n    data->bsize = bsize;\n    bufferevent_setcb (bev,\n                       NULL,\n                       write_block_data_cb,\n                       my_block_event_cb,\n                       data);\n    /* Block any new request from this connection before finish\n     * handling this request.\n     */\n    evhtp_request_pause (req);\n\n    /* Kick start data transfer by sending out http headers. */\n    evhtp_send_reply_start(req, EVHTP_RES_OK);\n\n    return 0;\n}\n\nstatic void\naccess_blks_cb(evhtp_request_t *req, void *arg)\n{\n    SeafRepo *repo = NULL;\n    char *error = NULL;\n    char *token = NULL;\n    char *blkid = NULL;\n    const char *repo_id = NULL;\n    const char *id = NULL;\n    const char *operation = NULL;\n    const char *user = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n\n    char *repo_role = NULL;\n    SeafileWebAccess *webaccess = NULL;\n\n    /* Skip the first '/'. */\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 3 ||\n        strcmp (parts[0], \"blks\") != 0) {\n        error = \"Invalid URL\";\n        goto on_error;\n    }\n\n    token = parts[1];\n    blkid = parts[2];\n\n    webaccess = seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token);\n    if (!webaccess) {\n        error = \"Access token not found\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto on_error;\n    }\n\n    if (can_use_cached_content (req)) {\n        goto success;\n    }\n\n    repo_id = seafile_web_access_get_repo_id (webaccess);\n    id = seafile_web_access_get_obj_id (webaccess);\n    operation = seafile_web_access_get_op (webaccess);\n    user = seafile_web_access_get_username (webaccess);\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        error = \"Bad repo id\\n\";\n        goto on_error;\n    }\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo->store_id, repo->version, id)) {\n        error = \"Invalid file id\\n\";\n        goto on_error;\n    }\n\n    if (strcmp(operation, \"downloadblks\") == 0) {\n        if (do_block(req, repo, user, id, blkid) < 0) {\n            seaf_warning (\"Failed to download blocks for token: %s\\n\", token);\n            error_code = EVHTP_RES_SERVERR;\n            goto on_error;\n        }\n    }\n\nsuccess:\n    g_strfreev (parts);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    g_free (repo_role);\n    g_object_unref (webaccess);\n\n    return;\n\non_error:\n    g_strfreev (parts);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    g_free (repo_role);\n    if (webaccess != NULL)\n        g_object_unref (webaccess);\n\n    evbuffer_add_printf(req->buffer_out, \"%s\\n\", error);\n    evhtp_send_reply(req, error_code);\n}\n\nstatic void\naccess_link_cb(evhtp_request_t *req, void *arg)\n{\n    SeafRepo *repo = NULL;\n    char *error_str = NULL;\n    char *token = NULL;\n    char *rpath = NULL;\n    char *filename = NULL;\n    char *file_id = NULL;\n    char *user = NULL;\n    char *norm_file_path = NULL;\n    const char *repo_id = NULL;\n    const char *file_path = NULL;\n    const char *share_type = NULL;\n    const char *byte_ranges = NULL;\n    const char *operation = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n\n    SeafileCryptKey *key = NULL;\n    SeafileShareLinkInfo *info = NULL;\n    GError *error = NULL;\n\n    if (!seaf->seahub_pk) {\n        seaf_warning (\"No seahub private key is configured.\\n\");\n        evhtp_send_reply(req, EVHTP_RES_NOTFOUND);\n        return;\n    }\n\n    /* Skip the first '/'. */\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 2 ||\n        strcmp (parts[0], \"f\") != 0) {\n        error_str = \"Invalid URL\\n\";\n        goto out;\n    }\n\n    token = parts[1];\n\n    operation = evhtp_kv_find (req->uri->query, \"op\");\n    if (g_strcmp0 (operation, \"view\") != 0) {\n        operation = \"download-link\";\n    }\n\n    char *ip_addr = get_client_ip_addr (req);\n    const char *user_agent = evhtp_header_find (req->headers_in, \"User-Agent\");\n\n    const char *cookie = evhtp_kv_find (req->headers_in, \"Cookie\");\n    int status = HTTP_OK;\n    char *err_msg = NULL;\n    info = http_tx_manager_query_share_link_info (token, cookie, \"file\", ip_addr, user_agent, &status, &err_msg);\n    if (!info) {\n        g_strfreev (parts);\n        if (status != HTTP_OK) {\n            evbuffer_add_printf(req->buffer_out, \"%s\\n\", err_msg);\n            evhtp_send_reply(req, status);\n        } else {\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            evbuffer_add_printf(req->buffer_out, \"%s\\n\", error_str);\n            evhtp_send_reply(req, error_code);\n        }\n        g_free (ip_addr);\n        g_free (err_msg);\n        return;\n    }\n    g_free (ip_addr);\n\n    repo_id = seafile_share_link_info_get_repo_id (info);\n    file_path = seafile_share_link_info_get_file_path (info);\n    if (!file_path) {\n        error_str = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        seaf_warning (\"Failed to get file_path by token %s\\n\", token);\n        goto out;\n    }\n    share_type = seafile_share_link_info_get_share_type (info);\n    if (g_strcmp0 (share_type, \"f\") != 0) {\n        error_str = \"Link type mismatch\";\n        goto out;\n    }\n\n    norm_file_path = normalize_utf8_path(file_path);\n    rpath = format_dir_path (norm_file_path);\n    filename = g_path_get_basename (rpath);\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        error_str = \"Bad repo id\\n\";\n        goto out;\n    }\n    user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n\n    file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, rpath, &error);\n    if (!file_id) {\n        error_str = \"Invalid file_path\\n\";\n        if (error)\n            g_clear_error(&error);\n        goto out;\n    }\n\n    const char *etag = evhtp_kv_find (req->headers_in, \"If-None-Match\");\n    if (g_strcmp0 (etag, file_id) == 0) {\n        evhtp_send_reply (req, EVHTP_RES_NOTMOD);\n        error_code = EVHTP_RES_OK;\n        goto out;\n    }\n    set_etag (req, file_id);\n    set_no_cache (req, FALSE);\n\n    byte_ranges = evhtp_kv_find (req->headers_in, \"Range\");\n\n    if (repo->encrypted) {\n        key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                                   repo_id, user);\n        if (!key) {\n            error_str = \"Repo is encrypted. Please provide password to view it.\";\n            goto out;\n        }\n    }\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo->store_id, repo->version, file_id)) {\n        error_str = \"Invalid file id\\n\";\n        goto out;\n    }\n\n    if (!repo->encrypted && byte_ranges) {\n        if (do_file_range (req, repo, file_id, filename, operation, byte_ranges, user) < 0) {\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            goto out;\n        }\n    } else if (do_file(req, repo, file_id, filename, operation, key, user) < 0) {\n        error_str = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto out;\n    }\n\n    error_code = EVHTP_RES_OK;\n\nout:\n    g_strfreev (parts);\n    g_free (user);\n    g_free (norm_file_path);\n    g_free (rpath);\n    g_free (filename);\n    g_free (file_id);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n    if (info != NULL)\n        g_object_unref (info);\n\n    if (error_code != EVHTP_RES_OK) {\n        evbuffer_add_printf(req->buffer_out, \"%s\\n\", error_str);\n        evhtp_send_reply(req, error_code);\n    }\n}\n\n/*\nstatic GList *\njson_to_dirent_list (SeafRepo *repo, const char *parent_dir, const char *dirents)\n{\n    json_t *array;\n    json_error_t jerror;\n    int i;\n    int len;\n    const char *tmp_file_name;\n    char *file_name = NULL;\n    GList *dirent_list = NULL, *p = NULL;\n    SeafDir *dir;\n    SeafDirent *dirent;\n    GError *error = NULL;\n\n    array = json_loadb (dirents, strlen(dirents), 0, &jerror);\n    if (!array) {\n        seaf_warning (\"Failed to parse download data: %s.\\n\", jerror.text);\n        return NULL;\n    }\n    len = json_array_size (array);\n    if (len == 0) {\n        seaf_warning (\"Invalid download data, miss download file name.\\n\");\n        json_decref (array);\n        return NULL;\n    }\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id,\n                                               repo->version, repo->root_id, parent_dir, &error);\n    if (!dir) {\n        if (error) {\n            seaf_warning (\"Failed to get dir %s repo %.8s: %s.\\n\",\n                          parent_dir, repo->store_id, error->message);\n            g_clear_error(&error);\n        } else {\n            seaf_warning (\"dir %s doesn't exist in repo %.8s.\\n\",\n                          parent_dir, repo->store_id);\n        }\n        json_decref (array);\n        return NULL;\n    }\n\n    GHashTable *dirent_hash = g_hash_table_new(g_str_hash, g_str_equal);\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        g_hash_table_insert(dirent_hash, d->name, d);\n    }\n\n    for (i = 0; i < len; i++) {\n        tmp_file_name = json_string_value (json_array_get (array, i));\n        file_name = normalize_utf8_path(tmp_file_name);\n        if (strcmp (file_name, \"\") == 0 || strchr (file_name, '/') != NULL) {\n            seaf_warning (\"Invalid download file name: %s.\\n\", file_name);\n            if (dirent_list) {\n                g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n                dirent_list = NULL;\n            }\n            g_free (file_name);\n            break;\n        }\n\n        dirent = g_hash_table_lookup (dirent_hash, file_name);\n        if (!dirent) {\n            seaf_warning (\"Failed to get dirent for %s in dir %s in repo %.8s.\\n\",\n                           file_name, parent_dir, repo->store_id);\n            if (dirent_list) {\n                g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n                dirent_list = NULL;\n            }\n            g_free (file_name);\n            break;\n        }\n\n        dirent_list = g_list_prepend (dirent_list, seaf_dirent_dup(dirent));\n        g_free (file_name);\n    }\n\n    g_hash_table_unref(dirent_hash);\n    json_decref (array);\n    seaf_dir_free (dir);\n    return dirent_list;\n}\n\n// application/x-www-form-urlencoded\n// parent_dir=/sub&dirents=[a.md, suba]\nstatic char *\nget_form_field (const char *body_str, const char *field_name)\n{\n    char * value = NULL;\n    char * result = NULL;\n    char * start = strstr(body_str, field_name);\n    // find pos of start\n    if (start) {\n        // skip field and '='\n        start += strlen(field_name) + 1;\n\n        // find pos of '&'\n        char * end = strchr(start, '&');\n        if (end == NULL) {\n            end = start + strlen(start);\n        }\n\n        value = g_strndup(start, end - start);\n    }\n    if (!value) {\n        return NULL;\n    }\n    result = g_uri_unescape_string (value, NULL);\n    g_free (value);\n    return result;\n}\n*/\n\n/*\nstatic void\naccess_dir_link_cb(evhtp_request_t *req, void *arg)\n{\n    SeafRepo *repo = NULL;\n    char *error_str = NULL;\n    char *token = NULL;\n    char *r_parent_dir = NULL;\n    char *fullpath = NULL;\n    char *file_id = NULL;\n    char *filename = NULL;\n    char *norm_parent_dir = NULL;\n    char *norm_path = NULL;\n    char *user = NULL;\n    char *tmp_parent_dir = NULL;\n    char *dirents = NULL;\n    const char *repo_id = NULL;\n    const char *parent_dir = NULL;\n    const char *path= NULL;\n    const char *byte_ranges = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n\n    SeafileCryptKey *key = NULL;\n    SeafileShareLinkInfo *info = NULL;\n    GError *error = NULL;\n\n    if (!seaf->seahub_pk) {\n        seaf_warning (\"No seahub private key is configured.\\n\");\n        evhtp_send_reply(req, EVHTP_RES_NOTFOUND);\n        return;\n    }\n\n    // Skip the first '/'.\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 2 ||\n        strcmp (parts[0], \"d\") != 0) {\n        error_str = \"Invalid URL\\n\";\n        goto on_error;\n    }\n\n    token = parts[1];\n\n    if (g_strv_length (parts) >= 4) {\n        if (strcmp (parts[2], \"zip-task\") != 0) {\n            error_str = \"Invalid URL\\n\";\n            goto on_error;\n        }\n        char *task_id = parts[3];\n        char *progress = zip_download_mgr_query_zip_progress (seaf->zip_download_mgr, task_id, NULL);\n        if (!progress) {\n            error_str = \"No zip progress\\n\";\n            goto on_error;\n        }\n        evbuffer_add_printf (req->buffer_out, \"%s\", progress);\n        evhtp_headers_add_header (\n            req->headers_out,\n            evhtp_header_new(\"Content-Type\", \"application/json; charset=utf-8\", 1, 1));\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        g_free (progress);\n        goto success;\n    }\n\n    info = http_tx_manager_query_share_link_info (token, \"dir\");\n    if (!info) {\n        error_str = \"Link token not found\\n\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto on_error;\n    }\n\n    repo_id = seafile_share_link_info_get_repo_id (info);\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        error_str = \"Bad repo id\\n\";\n        goto on_error;\n    }\n    user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n\n    path = evhtp_kv_find (req->uri->query, \"p\");\n    if (!path) {\n        int len = evbuffer_get_length (req->buffer_in);\n        if (len <= 0) {\n            error_str = \"Invalid request body\\n\";\n            goto on_error;\n        }\n        char *body = g_new0 (char, len);\n        evbuffer_remove(req->buffer_in, body, len);\n        tmp_parent_dir = get_form_field (body, \"parent_dir\");\n        if (!tmp_parent_dir) {\n            g_free (body);\n            error_str = \"Invalid parent_dir\\n\";\n            goto on_error;\n        }\n\n        dirents = get_form_field (body, \"dirents\");\n        if (!dirents) {\n            g_free (body);\n            g_free (tmp_parent_dir);\n            error_str = \"Invalid dirents\\n\";\n            goto on_error;\n        }\n        g_free (body);\n\n        norm_parent_dir = normalize_utf8_path (tmp_parent_dir);\n        r_parent_dir = format_dir_path (norm_parent_dir);\n        GList *dirent_list = json_to_dirent_list (repo, r_parent_dir, dirents);\n        if (!dirent_list) {\n            error_str = \"Invalid dirents\\n\";\n            goto on_error;\n        }\n\n        char *task_id = NULL;\n        if (g_list_length(dirent_list) == 1) {\n            task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, \"download-dir-link\", user, dirent_list);\n        } else {\n            task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, \"download-multi-link\", user, dirent_list);\n        }\n        if (!task_id) {\n            g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            goto on_error;\n        }\n        evbuffer_add_printf (req->buffer_out, \"{\\\"task_id\\\": \\\"%s\\\"}\", task_id);\n        evhtp_headers_add_header (\n            req->headers_out,\n            evhtp_header_new(\"Content-Type\", \"application/json; charset=utf-8\", 1, 1));\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        g_free (task_id);\n        goto success;\n    }\n\n    if (can_use_cached_content (req)) {\n        goto success;\n    }\n\n    parent_dir = seafile_share_link_info_get_parent_dir (info);\n    norm_parent_dir = normalize_utf8_path (parent_dir);\n    norm_path = normalize_utf8_path (path);\n    r_parent_dir = format_dir_path (norm_parent_dir);\n    fullpath = g_build_filename(r_parent_dir, norm_path, NULL);\n    filename = g_path_get_basename (fullpath);\n\n    file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, fullpath, &error);\n    if (!file_id) {\n        error_str = \"Invalid file_path\\n\";\n        if (error)\n            g_clear_error(&error);\n        goto on_error;\n    }\n    set_etag (req, file_id);\n\n    byte_ranges = evhtp_kv_find (req->headers_in, \"Range\");\n\n    if (repo->encrypted) {\n        key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                                   repo_id, user);\n        if (!key) {\n            error_str = \"Repo is encrypted. Please provide password to view it.\";\n            goto on_error;\n        }\n    }\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo->store_id, repo->version, file_id)) {\n        error_str = \"Invalid file id\\n\";\n        goto on_error;\n    }\n\n    if (!repo->encrypted && byte_ranges) {\n        if (do_file_range (req, repo, file_id, filename, \"download-link\", byte_ranges, user) < 0) {\n            error_str = \"Internal server error\\n\";\n            error_code = EVHTP_RES_SERVERR;\n            goto on_error;\n        }\n    } else if (do_file(req, repo, file_id, filename, \"download-link\", key, user) < 0) {\n        error_str = \"Internal server error\\n\";\n        error_code = EVHTP_RES_SERVERR;\n        goto on_error;\n    }\n\nsuccess:\n    g_strfreev (parts);\n    g_free (tmp_parent_dir);\n    g_free (dirents);\n    g_free (user);\n    g_free (norm_parent_dir);\n    g_free (norm_path);\n    g_free (r_parent_dir);\n    g_free (fullpath);\n    g_free (filename);\n    g_free (file_id);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n    if (info)\n        g_object_unref (info);\n\n    return;\n\non_error:\n    g_strfreev (parts);\n    g_free (tmp_parent_dir);\n    g_free (dirents);\n    g_free (user);\n    g_free (norm_parent_dir);\n    g_free (norm_path);\n    g_free (r_parent_dir);\n    g_free (fullpath);\n    g_free (filename);\n    g_free (file_id);\n    if (repo != NULL)\n        seaf_repo_unref (repo);\n    if (key != NULL)\n        g_object_unref (key);\n    if (info != NULL)\n        g_object_unref (info);\n\n    evbuffer_add_printf(req->buffer_out, \"%s\\n\", error_str);\n    evhtp_send_reply(req, error_code);\n}\n*/\n\nstatic evhtp_res\nrequest_finish_cb (evhtp_request_t *req, void *arg)\n{\n    RequestInfo *info = arg;\n    struct timeval end, intv;\n\n    seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr);\n\n    if (!info)\n        return EVHTP_RES_OK;\n\n    g_free (info->url_path);\n    g_free (info);\n    return EVHTP_RES_OK;\n}\n\nstatic evhtp_res\naccess_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg)\n{\n    RequestInfo *info = NULL;\n    info = g_new0 (RequestInfo, 1);\n    info->url_path = g_strdup (req->uri->path->full);\n\n    gettimeofday (&info->start, NULL);\n\n    seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr);\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, request_finish_cb, info);\n    req->cbarg = info;\n\n    return EVHTP_RES_OK;\n}\n\nint\naccess_file_init (evhtp_t *htp)\n{\n    evhtp_callback_t *cb;\n\n    cb = evhtp_set_regex_cb (htp, \"^/files/.*\", access_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/blks/.*\", access_blks_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/zip/.*\", access_zip_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/f/.*\", access_link_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL);\n    //evhtp_set_regex_cb (htp, \"^/d/.*\", access_dir_link_cb, NULL);\n    cb = evhtp_set_regex_cb (htp, \"^/repos/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/files/.*\", access_v2_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, access_headers_cb, NULL);\n\n    return 0;\n}\n#endif\n"
  },
  {
    "path": "server/access-file.h",
    "content": "#ifndef ACCESS_FILE_H\n#define ACCESS_FILE_H\n\n#ifdef HAVE_EVHTP\nint\naccess_file_init (evhtp_t *htp);\n#endif\n\n#endif\n"
  },
  {
    "path": "server/change-set.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"seafile-session.h\"\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"change-set.h\"\n\nstruct _ChangeSetDir {\n    int version;\n    char dir_id[41];\n    /* A hash table of dirents for fast lookup and insertion. */\n    GHashTable *dents;\n\n};\ntypedef struct _ChangeSetDir ChangeSetDir;\n\nstruct _ChangeSetDirent {\n    guint32 mode;\n    char id[41];\n    char *name;\n    gint64 mtime;\n    char *modifier;\n    gint64 size;\n    /* Only used for directory. Most of time this is NULL\n     * unless we change the subdir too.\n     */\n    ChangeSetDir *subdir;\n};\ntypedef struct _ChangeSetDirent ChangeSetDirent;\n\n/* Change set dirent. */\n\nstatic ChangeSetDirent *\nchangeset_dirent_new (const char *id, guint32 mode, const char *name,\n                      gint64 mtime, const char *modifier, gint64 size)\n{\n    ChangeSetDirent *dent = g_new0 (ChangeSetDirent, 1);\n\n    dent->mode = mode;\n    memcpy (dent->id, id, 40);\n    dent->name = g_strdup(name);\n    dent->mtime = mtime;\n    dent->modifier = g_strdup(modifier);\n    dent->size = size;\n\n    return dent;    \n}\n\nstatic ChangeSetDirent *\nseaf_dirent_to_changeset_dirent (SeafDirent *seaf_dent)\n{\n    return changeset_dirent_new (seaf_dent->id, seaf_dent->mode, seaf_dent->name,\n                                 seaf_dent->mtime, seaf_dent->modifier, seaf_dent->size);\n}\n\nstatic SeafDirent *\nchangeset_dirent_to_seaf_dirent (int version, ChangeSetDirent *dent)\n{\n    return seaf_dirent_new (version, dent->id, dent->mode, dent->name,\n                            dent->mtime, dent->modifier, dent->size);\n}\n\nstatic void\nchangeset_dir_free (ChangeSetDir *dir);\n\nstatic void\nchangeset_dirent_free (ChangeSetDirent *dent)\n{\n    if (!dent)\n        return;\n\n    g_free (dent->name);\n    g_free (dent->modifier);\n    /* Recursively free subdir. */\n    if (dent->subdir)\n        changeset_dir_free (dent->subdir);\n    g_free (dent);\n}\n\n/* Change set dir. */\n\nstatic void\nadd_dent_to_dir (ChangeSetDir *dir, ChangeSetDirent *dent)\n{\n    g_hash_table_insert (dir->dents,\n                         g_strdup(dent->name),\n                         dent);\n}\n\nstatic void\nremove_dent_from_dir (ChangeSetDir *dir, const char *dname)\n{\n    char *key;\n\n    if (g_hash_table_lookup_extended (dir->dents, dname,\n                                      (gpointer*)&key, NULL)) {\n        g_hash_table_steal (dir->dents, dname);\n        g_free (key);\n    }\n}\n\nstatic ChangeSetDir *\nchangeset_dir_new (int version, const char *id, GList *dirents)\n{\n    ChangeSetDir *dir = g_new0 (ChangeSetDir, 1);\n    GList *ptr;\n    SeafDirent *dent;\n    ChangeSetDirent *changeset_dent;\n\n    dir->version = version;\n    if (id)\n        memcpy (dir->dir_id, id, 40);\n    dir->dents = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                        g_free, (GDestroyNotify)changeset_dirent_free);\n    for (ptr = dirents; ptr; ptr = ptr->next) {\n        dent = ptr->data;\n        changeset_dent = seaf_dirent_to_changeset_dirent(dent);\n        add_dent_to_dir (dir, changeset_dent);\n    }\n\n    return dir;\n} \n\nstatic void\nchangeset_dir_free (ChangeSetDir *dir)\n{\n    if (!dir)\n        return;\n    g_hash_table_destroy (dir->dents);\n    g_free (dir);\n}\n\nstatic ChangeSetDir *\nseaf_dir_to_changeset_dir (SeafDir *seaf_dir)\n{\n    return changeset_dir_new (seaf_dir->version, seaf_dir->dir_id, seaf_dir->entries);\n}\n\nstatic gint\ncompare_dents (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *denta = a, *dentb = b;\n\n    return strcmp(dentb->name, denta->name);\n}\n\nstatic SeafDir *\nchangeset_dir_to_seaf_dir (ChangeSetDir *dir)\n{\n    GList *dents = NULL, *seaf_dents = NULL;\n    GList *ptr;\n    ChangeSetDirent *dent;\n    SeafDirent *seaf_dent;\n    SeafDir *seaf_dir;\n\n    dents = g_hash_table_get_values (dir->dents);\n    for (ptr = dents; ptr; ptr = ptr->next) {\n        dent = ptr->data;\n        seaf_dent = changeset_dirent_to_seaf_dirent (dir->version, dent);\n        seaf_dents = g_list_prepend (seaf_dents, seaf_dent);\n    }\n    /* Sort it in descending order. */\n    seaf_dents = g_list_sort (seaf_dents, compare_dents);\n\n    /* seaf_dir_new() computes the dir id. */\n    seaf_dir = seaf_dir_new (NULL, seaf_dents, dir->version);\n\n    g_list_free (dents);\n    return seaf_dir;\n}\n\n/* Change set. */\n\nChangeSet *\nchangeset_new (const char *repo_id, SeafDir *dir)\n{\n    ChangeSetDir *changeset_dir = NULL;\n    ChangeSet *changeset = NULL;\n\n    changeset_dir = seaf_dir_to_changeset_dir (dir);\n    if (!changeset_dir)\n        goto out;\n\n    changeset = g_new0 (ChangeSet, 1);\n    memcpy (changeset->repo_id, repo_id, 36);\n    changeset->tree_root = changeset_dir;\n\nout:\n    return changeset;\n}\n\nvoid\nchangeset_free (ChangeSet *changeset)\n{\n    if (!changeset)\n        return;\n\n    changeset_dir_free (changeset->tree_root);\n    g_free (changeset);\n}\n\nstatic ChangeSetDirent *\ndelete_from_tree (ChangeSet *changeset,\n                  const char *path,\n                  gboolean *parent_empty)\n{\n    char *repo_id = changeset->repo_id;\n    ChangeSetDir *root = changeset->tree_root;\n    char **parts, *dname;\n    int n, i;\n    ChangeSetDir *dir;\n    ChangeSetDirent *dent, *ret = NULL;\n    ChangeSetDirent *parent_dent = NULL;\n    SeafDir *seaf_dir;\n\n    *parent_empty = FALSE;\n\n    parts = g_strsplit (path, \"/\", 0);\n    n = g_strv_length(parts);\n    dir = root;\n    for (i = 0; i < n; i++) {\n        dname = parts[i];\n\n        dent = g_hash_table_lookup (dir->dents, dname);\n        if (!dent)\n            break;\n\n        if (S_ISDIR(dent->mode)) {\n            if (i == (n-1)) {\n                /* Remove from hash table without freeing dent. */\n                remove_dent_from_dir (dir, dname);\n                if (g_hash_table_size (dir->dents) == 0)\n                    *parent_empty = TRUE;\n                ret = dent;\n                // update parent dir mtime when delete dirs locally.\n                if (parent_dent) {\n                    parent_dent->mtime = time (NULL);\n                }\n                break;\n            }\n\n            if (!dent->subdir) {\n                seaf_dir = seaf_fs_manager_get_seafdir(seaf->fs_mgr,\n                                                       repo_id,\n                                                       root->version,\n                                                       dent->id);\n                if (!seaf_dir) {\n                    seaf_warning (\"Failed to load seafdir %s:%s\\n\",\n                                  repo_id, dent->id);\n                    break;\n                }\n                dent->subdir = seaf_dir_to_changeset_dir (seaf_dir);\n                seaf_dir_free (seaf_dir);\n            }\n            dir = dent->subdir;\n            parent_dent = dent;\n        } else if (S_ISREG(dent->mode)) {\n            if (i == (n-1)) {\n                /* Remove from hash table without freeing dent. */\n                remove_dent_from_dir (dir, dname);\n                if (g_hash_table_size (dir->dents) == 0)\n                    *parent_empty = TRUE;\n                ret = dent;\n                // update parent dir mtime when delete files locally.\n                if (parent_dent) {\n                    parent_dent->mtime = time (NULL);\n                }\n                break;\n            }\n        }\n    }\n\n    g_strfreev (parts);\n    return ret;\n}\n\nstatic void\nremove_from_changeset_recursive (ChangeSet *changeset,\n                                 const char *path,\n                                 gboolean remove_parent,\n                                 const char *top_dir,\n                                 int *mode)\n{\n    ChangeSetDirent *dent;\n    gboolean parent_empty = FALSE;\n\n    dent = delete_from_tree (changeset, path, &parent_empty);\n    if (mode && dent)\n        *mode = dent->mode;\n    changeset_dirent_free (dent);\n\n    if (remove_parent && parent_empty) {\n        char *parent = g_strdup(path);\n        char *slash = strrchr (parent, '/');\n        if (slash) {\n            *slash = '\\0';\n            if (strlen(parent) >= strlen(top_dir)) {\n                /* Recursively remove parent dirs. */\n                remove_from_changeset_recursive (changeset,\n                                                 parent,\n                                                 remove_parent,\n                                                 top_dir,\n                                                 mode);\n            }\n        }\n        g_free (parent);\n    }\n}\n\nvoid\nremove_from_changeset (ChangeSet *changeset,\n                       const char *path,\n                       gboolean remove_parent,\n                       const char *top_dir,\n                       int *mode)\n{\n    remove_from_changeset_recursive (changeset, path, remove_parent, top_dir, mode);\n}\n\nstatic char *\ncommit_tree_recursive (const char *repo_id, ChangeSetDir *dir)\n{\n    ChangeSetDirent *dent;\n    GHashTableIter iter;\n    gpointer key, value;\n    char *new_id;\n    SeafDir *seaf_dir;\n    char *ret = NULL;\n\n    g_hash_table_iter_init (&iter, dir->dents);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        dent = value;\n        if (dent->subdir) {\n            new_id = commit_tree_recursive (repo_id, dent->subdir);\n            if (!new_id)\n                return NULL;\n\n            memcpy (dent->id, new_id, 40);\n            g_free (new_id);\n        }\n    }\n\n    seaf_dir = changeset_dir_to_seaf_dir (dir);\n\n    memcpy (dir->dir_id, seaf_dir->dir_id, 40);\n\n    if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                        repo_id, dir->version,\n                                        seaf_dir->dir_id)) {\n        if (seaf_dir_save (seaf->fs_mgr, repo_id, dir->version, seaf_dir) < 0) {\n            seaf_warning (\"Failed to save dir object %s to repo %s.\\n\",\n                          seaf_dir->dir_id, repo_id);\n            goto out;\n        }\n    }\n\n    ret = g_strdup(seaf_dir->dir_id);\n\nout:\n    seaf_dir_free (seaf_dir);\n    return ret;\n}\n\n/*\n * This function does two things:\n * - calculate dir id from bottom up;\n * - create and save seaf dir objects.\n * It returns root dir id of the new commit.\n */\nchar *\ncommit_tree_from_changeset (ChangeSet *changeset)\n{\n    char *root_id = commit_tree_recursive (changeset->repo_id,\n                                           changeset->tree_root);\n\n    return root_id;\n}\n"
  },
  {
    "path": "server/change-set.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_CHANGE_SET_H\n#define SEAF_CHANGE_SET_H\n\n#include <glib.h>\n#include \"utils.h\"\n\nstruct _ChangeSetDir;\n\nstruct _ChangeSet {\n    char repo_id[37];\n    /* A partial tree for all changed directories. */\n    struct _ChangeSetDir *tree_root;\n};\ntypedef struct _ChangeSet ChangeSet;\n\nChangeSet *\nchangeset_new (const char *repo_id, SeafDir *dir);\n\nvoid\nchangeset_free (ChangeSet *changeset);\n\n/*\n  @remove_parent: remove the parent dir when it becomes empty.\n*/\nvoid\nremove_from_changeset (ChangeSet *changeset,\n                       const char *path,\n                       gboolean remove_parent,\n                       const char *top_dir,\n                       int *mode);\n\nchar *\ncommit_tree_from_changeset (ChangeSet *changeset);\n\n#endif\n"
  },
  {
    "path": "server/copy-mgr.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n\n#include <pthread.h>\n\n#include \"seafile-session.h\"\n#include \"seafile-object.h\"\n#include \"seafile-error.h\"\n\n#include \"copy-mgr.h\"\n\n#include \"utils.h\"\n\n#include \"log.h\"\n\n#define DEFAULT_MAX_THREADS 5\n\nstruct _SeafCopyManagerPriv {\n    GHashTable *copy_tasks;\n    pthread_mutex_t lock;\n    CcnetJobManager *job_mgr;\n};\n\nstatic void\ncopy_task_free (CopyTask *task)\n{\n    if (!task) return;\n\n    g_free (task->failed_reason);\n    g_free (task);\n}\n\nSeafCopyManager *\nseaf_copy_manager_new (struct _SeafileSession *session)\n{\n    SeafCopyManager *mgr = g_new0 (SeafCopyManager, 1);\n\n    mgr->session = session;\n    mgr->priv = g_new0 (struct _SeafCopyManagerPriv, 1);\n    mgr->priv->copy_tasks = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                   g_free,\n                                                   (GDestroyNotify)copy_task_free);\n    pthread_mutex_init (&mgr->priv->lock, NULL);\n\n    mgr->max_files = g_key_file_get_int64 (session->config,\n                                           \"web_copy\", \"max_files\", NULL);\n    mgr->max_size = g_key_file_get_int64 (session->config,\n                                          \"web_copy\", \"max_size\", NULL);\n    /* size is given in MB */\n    mgr->max_size <<= 20;\n\n    return mgr;\n}\n\nint\nseaf_copy_manager_start (SeafCopyManager *mgr)\n{\n    mgr->priv->job_mgr = ccnet_job_manager_new (DEFAULT_MAX_THREADS);\n\n    return 1;\n}\n\nSeafileCopyTask *\nseaf_copy_manager_get_task (SeafCopyManager *mgr,\n                            const char *task_id)\n{\n    SeafCopyManagerPriv *priv = mgr->priv;\n    CopyTask *task;\n    SeafileCopyTask *t = NULL;\n\n    pthread_mutex_lock (&priv->lock);\n\n    task = g_hash_table_lookup (priv->copy_tasks, task_id);\n\n    if (task) {\n        t = seafile_copy_task_new ();\n        g_object_set (t, \"done\", task->done, \"total\", task->total,\n                      \"canceled\", task->canceled, \"failed\", task->failed,\n                      \"failed_reason\", task->failed_reason, \"successful\", task->successful,\n                      NULL);\n        if (task->canceled || task->failed || task->successful)\n            g_hash_table_remove(priv->copy_tasks, task_id);\n    }\n\n    pthread_mutex_unlock (&priv->lock);\n\n    return t;\n}\n\nstruct CopyThreadData {\n    SeafCopyManager *mgr;\n    char src_repo_id[37];\n    char *src_path;\n    char *src_filename;\n    char dst_repo_id[37];\n    char *dst_path;\n    char *dst_filename;\n    int replace;\n    char *modifier;\n    CopyTask *task;\n    CopyTaskFunc func;\n};\ntypedef struct CopyThreadData CopyThreadData;\n\nstatic void *\ncopy_thread (void *vdata)\n{\n    CopyThreadData *data = vdata;\n\n    data->func (data->src_repo_id, data->src_path, data->src_filename,\n                data->dst_repo_id, data->dst_path, data->dst_filename,\n                data->replace, data->modifier, data->task);\n\n    return vdata;\n}\n\nstatic void\ncopy_done (void *vdata)\n{\n    CopyThreadData *data = vdata;\n\n    g_free (data->src_path);\n    g_free (data->src_filename);\n    g_free (data->dst_path);\n    g_free (data->dst_filename);\n    g_free (data->modifier);\n    g_free (data);\n}\n\nchar *\nseaf_copy_manager_add_task (SeafCopyManager *mgr,\n                            const char *src_repo_id,\n                            const char *src_path,\n                            const char *src_filename,\n                            const char *dst_repo_id,\n                            const char *dst_path,\n                            const char *dst_filename,\n                            int replace,\n                            const char *modifier,\n                            CopyTaskFunc function,\n                            gboolean need_progress)\n{\n    SeafCopyManagerPriv *priv = mgr->priv;\n    char *task_id = NULL;\n    CopyTask *task = NULL;\n    struct CopyThreadData *data;\n\n    if (need_progress) {\n        task_id = gen_uuid();\n        task = g_new0 (CopyTask, 1);\n        memcpy (task->task_id, task_id, 36);\n\n        pthread_mutex_lock (&priv->lock);\n        g_hash_table_insert (priv->copy_tasks, g_strdup(task_id), task);\n        pthread_mutex_unlock (&priv->lock);\n    }\n\n    data = g_new0 (CopyThreadData, 1);\n    data->mgr = mgr;\n    memcpy (data->src_repo_id, src_repo_id, 36);\n    data->src_path = g_strdup(src_path);\n    data->src_filename = g_strdup(src_filename);\n    memcpy (data->dst_repo_id, dst_repo_id, 36);\n    data->dst_path = g_strdup(dst_path);\n    data->dst_filename = g_strdup(dst_filename);\n    data->replace = replace;\n    data->modifier = g_strdup(modifier);\n    data->task = task;\n    data->func = function;\n\n    ccnet_job_manager_schedule_job (mgr->priv->job_mgr,\n                                    copy_thread,\n                                    copy_done,\n                                    data);\n    return task_id;\n}\n\nint\nseaf_copy_manager_cancel_task (SeafCopyManager *mgr, const char *task_id)\n{\n    SeafCopyManagerPriv *priv = mgr->priv;\n    CopyTask *task;\n\n    pthread_mutex_lock (&priv->lock);\n\n    task = g_hash_table_lookup (priv->copy_tasks, task_id);\n\n    pthread_mutex_unlock (&priv->lock);\n\n    if (task) {\n        if (task->canceled || task->failed || task->successful)\n            return -1;\n        g_atomic_int_set (&task->canceled, 1);\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "server/copy-mgr.h",
    "content": "#ifndef COPY_MGR_H\n#define COPY_MGR_H\n\n#include <glib.h>\n\n#define COPY_ERR_INTERNAL  \"Internal error when copy or move\"\n#define COPY_ERR_BAD_ARG  \"Invalid arguments\"\n#define COPY_ERR_TOO_MANY_FILES \"Too many files\"\n#define COPY_ERR_SIZE_TOO_LARGE \"Folder or file size is too large\"\n#define COPY_ERR_QUOTA_IS_FULL  \"Quota is full\"\n\nstruct _SeafileSession;\nstruct _SeafCopyManagerPriv;\nstruct _SeafileCopyTask;\n\nstruct _SeafCopyManager {\n    struct _SeafileSession *session;\n    struct _SeafCopyManagerPriv *priv;\n\n    gint64 max_files;\n    gint64 max_size;\n};\ntypedef struct _SeafCopyManager SeafCopyManager;\ntypedef struct _SeafCopyManagerPriv SeafCopyManagerPriv;\n\nstruct CopyTask {\n    char task_id[37];\n    gint64 done;\n    gint64 total;\n    gint canceled;\n    gboolean failed;\n    char *failed_reason;\n    gboolean successful;\n};\ntypedef struct CopyTask CopyTask;\n\nSeafCopyManager *\nseaf_copy_manager_new (struct _SeafileSession *session);\n\nint\nseaf_copy_manager_start (SeafCopyManager *mgr);\n\ntypedef int (*CopyTaskFunc) (const char *, const char *, const char *,\n                             const char *, const char *, const char *,\n                             int, const char *, CopyTask *);\n\nchar *\nseaf_copy_manager_add_task (SeafCopyManager *mgr,\n                            const char *src_repo_id,\n                            const char *src_path,\n                            const char *src_filename,\n                            const char *dst_repo_id,\n                            const char *dst_path,\n                            const char *dst_filename,\n                            int replace,\n                            const char *modifier,\n                            CopyTaskFunc function,\n                            gboolean need_progress);\n\nstruct _SeafileCopyTask *\nseaf_copy_manager_get_task (SeafCopyManager *mgr,\n                            const char * id);\n\nint\nseaf_copy_manager_cancel_task (SeafCopyManager *mgr, const char *task_id);\n\n#endif\n"
  },
  {
    "path": "server/fileserver-config.c",
    "content": "#include \"common.h\"\n\n#include <glib.h>\n\n#include \"fileserver-config.h\"\n\nconst char *OLD_GROUP_NAME = \"httpserver\";\nconst char *GROUP_NAME = \"fileserver\";\n\nstatic const char *\nget_group_name(GKeyFile *config)\n{\n    return g_key_file_has_group (config, GROUP_NAME) ? GROUP_NAME : OLD_GROUP_NAME;\n}\n\nint\nfileserver_config_get_integer(GKeyFile *config, char *key, GError **error)\n{\n    const char *group = get_group_name(config);\n    return g_key_file_get_integer (config, group, key, error);\n}\n\nint\nfileserver_config_get_int64(GKeyFile *config, char *key, GError **error)\n{\n    const char *group = get_group_name(config);\n    return g_key_file_get_int64 (config, group, key, error);\n}\n\nchar *\nfileserver_config_get_string(GKeyFile *config, char *key, GError **error)\n{\n    const char *group = get_group_name(config);\n    return g_key_file_get_string (config, group, key, error);\n}\n\ngboolean\nfileserver_config_get_boolean(GKeyFile *config, char *key, GError **error)\n{\n    const char *group = get_group_name(config);\n    return g_key_file_get_boolean (config, group, key, error);\n}\n"
  },
  {
    "path": "server/fileserver-config.h",
    "content": "#ifndef SEAFILE_FILESERVER_CONFIG_H\n#define SEAFILE_FILESERVER_CONFIG_H\n\nstruct GKeyFile;\n\nint\nfileserver_config_get_integer(GKeyFile *config, char *key, GError **error);\n\nchar *\nfileserver_config_get_string(GKeyFile *config, char *key, GError **error);\n\nint\nfileserver_config_get_int64(GKeyFile *config, char *key, GError **error);\n\ngboolean\nfileserver_config_get_boolean(GKeyFile *config, char *key, GError **error);\n\n#endif // SEAFILE_FILESERVER_CONFIG_H\n"
  },
  {
    "path": "server/gc/Makefile.am",
    "content": "\nAM_CFLAGS = -DPKGDATADIR=\\\"$(pkgdatadir)\\\" \\\n\t-DPACKAGE_DATA_DIR=\\\"\"$(pkgdatadir)\"\\\" \\\n\t-DSEAFILE_SERVER \\\n\t-I$(top_srcdir)/include \\\n\t-I$(top_srcdir)/lib \\\n\t-I$(top_builddir)/lib \\\n\t-I$(top_srcdir)/common \\\n\t@SEARPC_CFLAGS@ \\\n\t@GLIB2_CFLAGS@ \\\n\t@MSVC_CFLAGS@ \\\n\t@MYSQL_CFLAGS@ \\\n\t-Wall\n\nbin_PROGRAMS = seafserv-gc seaf-fsck\n\nnoinst_HEADERS = \\\n\tseafile-session.h \\\n\trepo-mgr.h \\\n\tverify.h \\\n\tfsck.h \\\n\tgc-core.h\n\ncommon_sources = \\\n\tseafile-session.c \\\n\trepo-mgr.c \\\n\t../../common/seaf-db.c \\\n\t../../common/branch-mgr.c \\\n\t../../common/fs-mgr.c \\\n\t../../common/block-mgr.c \\\n\t../../common/block-backend.c \\\n\t../../common/block-backend-fs.c \\\n\t../../common/commit-mgr.c \\\n\t../../common/log.c \\\n\t../../common/seaf-utils.c \\\n\t../../common/obj-store.c \\\n\t../../common/obj-backend-fs.c \\\n\t../../common/seafile-crypt.c \\\n\t../../common/password-hash.c \\\n\t../../common/config-mgr.c\n\nseafserv_gc_SOURCES = \\\n\tseafserv-gc.c \\\n\tverify.c \\\n\tgc-core.c \\\n\t$(common_sources)\n\nseafserv_gc_LDADD = $(top_builddir)/common/cdc/libcdc.la \\\n\t$(top_builddir)/lib/libseafile_common.la \\\n\t@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ \\\n\t@SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \\\n\t@MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@\n\nseaf_fsck_SOURCES = \\\n\tseaf-fsck.c \\\n\tfsck.c \\\n\t$(common_sources)\n\nseaf_fsck_LDADD = $(top_builddir)/common/cdc/libcdc.la \\\n\t$(top_builddir)/lib/libseafile_common.la \\\n\t@GLIB2_LIBS@ @GOBJECT_LIBS@ @SSL_LIBS@ @LIB_RT@ @LIB_UUID@ -lsqlite3 @LIBEVENT_LIBS@ \\\n\t@SEARPC_LIBS@ @JANSSON_LIBS@ ${LIB_WS32} @ZLIB_LIBS@ \\\n\t@MYSQL_LIBS@ -lsqlite3 @ARGON2_LIBS@\n"
  },
  {
    "path": "server/gc/fsck.c",
    "content": "#include \"common.h\"\n\n#include <fcntl.h>\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"fsck.h\"\n\ntypedef struct FsckData {\n    FsckOptions *options;\n    SeafRepo *repo;\n    GHashTable *existing_blocks;\n    GList *repaired_files;\n    GList *repaired_folders;\n    gint64 truncate_time;\n} FsckData;\n\ntypedef struct CheckAndRecoverRepoObj {\n    char *repo_id;\n    FsckOptions *options;\n} CheckAndRecoverRepoObj;\n\ntypedef enum VerifyType {\n    VERIFY_FILE,\n    VERIFY_DIR\n} VerifyType;\n\nstatic gboolean\nfsck_verify_seafobj (const char *store_id,\n                     int version,\n                     const char *obj_id,\n                     gboolean *io_error,\n                     VerifyType type,\n                     gboolean repair)\n{\n    gboolean valid = TRUE;\n\n    valid = seaf_fs_manager_object_exists (seaf->fs_mgr, store_id,\n                                           version, obj_id);\n    if (!valid) {\n        if (type == VERIFY_FILE) {\n            seaf_message (\"File %s is missing.\\n\", obj_id);\n        }  else if (type == VERIFY_DIR) {\n            seaf_message (\"Dir %s is missing.\\n\", obj_id);\n        }\n        return valid;\n    }\n\n    if (type == VERIFY_FILE) {\n        valid = seaf_fs_manager_verify_seafile (seaf->fs_mgr, store_id, version,\n                                                obj_id, TRUE, io_error);\n        if (!valid && !*io_error && repair) {\n            seaf_message (\"File %s is damaged.\\n\", obj_id);\n        }\n    } else if (type == VERIFY_DIR) {\n        valid = seaf_fs_manager_verify_seafdir (seaf->fs_mgr, store_id, version,\n                                                obj_id, TRUE, io_error);\n        if (!valid && !*io_error && repair) {\n            seaf_message (\"Dir %s is damaged.\\n\", obj_id);\n        }\n    }\n\n    return valid;\n}\n\nstatic int\ncheck_blocks (const char *file_id, FsckData *fsck_data, gboolean *io_error)\n{\n    Seafile *seafile;\n    int i;\n    char *block_id;\n    int ret = 0;\n    int dummy;\n\n    gboolean ok = TRUE;\n    SeafRepo *repo = fsck_data->repo;\n    const char *store_id = repo->store_id;\n    int version = repo->version;\n\n    seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id,\n                                           version, file_id);\n    if (!seafile) {\n        seaf_warning (\"Failed to get seafile: %s/%s\\n\", store_id, file_id);\n        return -1;\n    }\n\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        block_id = seafile->blk_sha1s[i];\n\n        if (g_hash_table_lookup (fsck_data->existing_blocks, block_id))\n            continue;\n\n        if (!seaf_block_manager_block_exists (seaf->block_mgr,\n                                              store_id, version,\n                                              block_id)) {\n            seaf_warning (\"Repo[%.8s] block %s:%s is missing.\\n\", repo->id, store_id, block_id);\n            ret = -1;\n            continue;\n        }\n\n        if (fsck_data->options->check_integrity) {\n            // check block integrity, if not remove it\n            ok = seaf_block_manager_verify_block (seaf->block_mgr,\n                                                  store_id, version,\n                                                  block_id, io_error);\n            if (!ok) {\n                if (*io_error) {\n                    if (ret < 0) {\n                        *io_error = FALSE;\n                    }\n                    ret = -1;\n                    break;\n                } else {\n                    if (fsck_data->options->repair) {\n                        seaf_message (\"Repo[%.8s] block %s is damaged, remove it.\\n\", repo->id, block_id);\n                        seaf_block_manager_remove_block (seaf->block_mgr,\n                                                         store_id, version,\n                                                         block_id);\n                    } else {\n                        seaf_message (\"Repo[%.8s] block %s is damaged.\\n\", repo->id, block_id);\n                    }\n                    ret = -1;\n                }\n            }\n        }\n\n        g_hash_table_insert (fsck_data->existing_blocks, g_strdup(block_id), &dummy);\n    }\n\n    seafile_unref (seafile);\n\n    return ret;\n}\n\ntypedef struct {\n    SeafRepo *repo;\n    const char *file_path;\n    const char *file_id;\n    char *commit_id;\n    gboolean found;\n    gboolean traversed_head;\n    GHashTable *visited_commits;\n    gint64 truncate_time;\n} CheckFileSizeData;\n\nstatic gboolean\nget_file_updated_commit (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    CheckFileSizeData *data = vdata;\n    SeafRepo *repo = data->repo;\n    int ret;\n\n    if (data->found) {\n        *stop = TRUE;\n        return TRUE;\n    }\n\n    if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) {\n        *stop = TRUE;\n        return TRUE;\n    }\n\n    int dummy;\n    g_hash_table_replace (data->visited_commits,\n                          g_strdup (commit->commit_id), &dummy);\n\n    if (data->truncate_time == 0)\n    {\n        *stop = TRUE;\n    } else if (data->truncate_time > 0 &&\n             (gint64)(commit->ctime) < data->truncate_time &&\n             data->traversed_head)\n    {\n        *stop = TRUE;\n    }\n\n    if (!data->traversed_head)\n        data->traversed_head = TRUE;\n\n    char *file_id;\n    guint32 mode;\n    GError *error = NULL;\n    file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              commit->root_id, data->file_path, &mode, &error);\n    if (error) {\n        g_clear_error (&error);\n    }\n\n    // Compare the file_id with the current file.\n    // If the file_id has changed, then the previous commit is the commit where the file was modified.\n    if (g_strcmp0 (data->file_id, file_id) != 0) {\n        data->found = TRUE;\n        *stop = TRUE;\n    } else {\n        g_free (data->commit_id);\n        data->commit_id = g_strdup(commit->commit_id);\n    }\n    g_free (file_id);\n\n    return TRUE;\n}\n\nstatic int\ncheck_file_size (FsckData *fsck_data, SeafDirent *dent, const char *path)\n{\n    int ret = 0;\n    SeafRepo *repo = fsck_data->repo;\n    const char *store_id = repo->store_id;\n    int version = repo->version;\n    Seafile *seafile = NULL;\n\n    seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id,\n                                           version, dent->id);\n\n    if (!seafile) {\n        seaf_warning (\"Failed to get seafile: %s/%s\\n\", store_id, dent->id);\n        return -1;\n    }\n\n    if (seafile->file_size == dent->size) {\n        goto out;\n    }\n\n    CheckFileSizeData data;\n    memset (&data, 0, sizeof(CheckFileSizeData));\n    data.repo = repo;\n    data.file_path = path;\n    data.file_id = dent->id;\n    data.commit_id = g_strdup (repo->head->commit_id);\n    data.visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                   g_free, NULL);\n    data.truncate_time = fsck_data->truncate_time;\n    // Get the commit that added or modified this file.\n    seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                              repo->id, version,\n                                              repo->head->commit_id,\n                                              get_file_updated_commit,\n                                              &data,\n                                              FALSE);\n    SeafCommit *commit = NULL;\n    if (data.found) {\n        commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id,\n                                                 repo->version, data.commit_id);\n    }\n    if (commit) {\n        char time_buf[64];\n        strftime (time_buf, 64, \"%Y-%m-%d %H:%M:%S\", localtime((time_t *)&commit->ctime));\n        seaf_warning (\"Repo[%s] file %s is damaged, as its size does not match the expected value. It was uploaded via %s (commit id is %s), commit desc is %s, commit time is %s.\\n\",\n                      repo->id, path, commit->client_version, commit->commit_id, commit->desc, time_buf);\n    } else {\n        seaf_warning (\"Repo[%s] file %s is damaged, as its size does not match the expected value.\\n\",\n                      repo->id, path);\n    }\n\n    if (data.commit_id)\n        g_free (data.commit_id);\n    g_hash_table_destroy (data.visited_commits);\n\n    if (commit)\n        seaf_commit_unref (commit);\nout:\n\n    seafile_unref (seafile);\n    return ret;\n}\n\nstatic char*\nfsck_check_dir_recursive (const char *id, const char *parent_dir, FsckData *fsck_data)\n{\n    SeafDir *dir;\n    SeafDir *new_dir;\n    GList *p;\n    SeafDirent *seaf_dent;\n    char *dir_id = NULL;\n    char *path = NULL;\n    gboolean io_error = FALSE;\n\n    SeafFSManager *mgr = seaf->fs_mgr;\n    char *store_id = fsck_data->repo->store_id;\n    int version = fsck_data->repo->version;\n    gboolean is_corrupted = FALSE;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, store_id, version, id);\n    if (!dir) {\n        goto out;\n    }\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = p->data;\n        io_error = FALSE;\n\n        if (S_ISREG(seaf_dent->mode)) {\n            path = g_strdup_printf (\"%s%s\", parent_dir, seaf_dent->name);\n            if (!path) {\n                seaf_warning (\"Out of memory, stop to run fsck for repo %.8s.\\n\",\n                              fsck_data->repo->id);\n                goto out;\n            }\n            if (!fsck_verify_seafobj (store_id, version,\n                                      seaf_dent->id, &io_error,\n                                      VERIFY_FILE, fsck_data->options->repair)) {\n                if (io_error) {\n                    g_free (path);\n                    goto out;\n                }\n                is_corrupted = TRUE;\n                if (fsck_data->options->repair) {\n                    seaf_message (\"Repo[%.8s] file %s(%.8s) is damaged, recreate an empty file.\\n\",\n                                  fsck_data->repo->id, path, seaf_dent->id);\n                } else {\n                    seaf_message (\"Repo[%.8s] file %s(%.8s) is damaged.\\n\",\n                                  fsck_data->repo->id, path, seaf_dent->id);\n                }\n                // file damaged, set it empty\n                memcpy (seaf_dent->id, EMPTY_SHA1, 40);\n                seaf_dent->mtime = (gint64)time(NULL);\n                seaf_dent->size = 0;\n\n                fsck_data->repaired_files = g_list_prepend (fsck_data->repaired_files,\n                                                            g_strdup(path));\n            } else {\n                if (check_blocks (seaf_dent->id, fsck_data, &io_error) < 0) {\n                    if (io_error) {\n                        seaf_message (\"Failed to check blocks for repo[%.8s] file %s(%.8s).\\n\",\n                                      fsck_data->repo->id, path, seaf_dent->id);\n                        g_free (path);\n                        goto out;\n                    }\n                    is_corrupted = TRUE;\n                    if (fsck_data->options->repair) {\n                        seaf_message (\"Repo[%.8s] file %s(%.8s) is damaged, recreate an empty file.\\n\",\n                                      fsck_data->repo->id, path, seaf_dent->id);\n                    } else {\n                        seaf_message (\"Repo[%.8s] file %s(%.8s) is damaged.\\n\",\n                                      fsck_data->repo->id, path, seaf_dent->id);\n                    }\n                    // file damaged, set it empty\n                    memcpy (seaf_dent->id, EMPTY_SHA1, 40);\n                    seaf_dent->mtime = (gint64)time(NULL);\n                    seaf_dent->size = 0;\n\n                    fsck_data->repaired_files = g_list_prepend (fsck_data->repaired_files,\n                                                                g_strdup(path));\n                } else if (fsck_data->options->check_file_size) {\n                    check_file_size (fsck_data, seaf_dent, path);\n                }\n            }\n\n            g_free (path);\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            path = g_strdup_printf (\"%s%s/\", parent_dir, seaf_dent->name);\n            if (!path) {\n                seaf_warning (\"Out of memory, stop to run fsck for repo [%.8s].\\n\",\n                              fsck_data->repo->id);\n                goto out;\n            }\n            if (!fsck_verify_seafobj (store_id, version,\n                                      seaf_dent->id, &io_error,\n                                      VERIFY_DIR, fsck_data->options->repair)) {\n                if (io_error) {\n                    g_free (path);\n                    goto out;\n                }\n                if (fsck_data->options->repair) {\n                    seaf_message (\"Repo[%.8s] dir %s(%.8s) is damaged, recreate an empty dir.\\n\",\n                                  fsck_data->repo->id, path, seaf_dent->id);\n                } else {\n                    seaf_message (\"Repo[%.8s] dir %s(%.8s) is damaged.\\n\",\n                                  fsck_data->repo->id, path, seaf_dent->id);\n                }\n                is_corrupted = TRUE;\n                // dir damaged, set it empty\n                memcpy (seaf_dent->id, EMPTY_SHA1, 40);\n\n                fsck_data->repaired_folders = g_list_prepend (fsck_data->repaired_folders,\n                                                              g_strdup(path));\n            } else {\n                char *sub_dir_id = fsck_check_dir_recursive (seaf_dent->id, path, fsck_data);\n                if (sub_dir_id == NULL) {\n                    // IO error\n                    g_free (path);\n                    goto out;\n                }\n                if (strcmp (sub_dir_id, seaf_dent->id) != 0) {\n                    is_corrupted = TRUE;\n                    // dir damaged, set it to new dir_id\n                    memcpy (seaf_dent->id, sub_dir_id, 41);\n                }\n                g_free (sub_dir_id);\n            }\n            g_free (path);\n        }\n    }\n\n    if (is_corrupted) {\n        new_dir = seaf_dir_new (NULL, dir->entries, version);\n        if (fsck_data->options->repair) {\n            if (seaf_dir_save (mgr, store_id, version, new_dir) < 0) {\n                seaf_warning (\"Repo[%.8s] failed to save dir\\n\", fsck_data->repo->id);\n                seaf_dir_free (new_dir);\n                // dir->entries was taken by new_dir, which has been freed.\n                dir->entries = NULL;\n                goto out;\n            }\n        }\n        dir_id = g_strdup (new_dir->dir_id);\n        seaf_dir_free (new_dir);\n        dir->entries = NULL;\n    } else {\n        dir_id = g_strdup (dir->dir_id);\n    }\n\nout:\n    seaf_dir_free (dir);\n\n    return dir_id;\n}\n\nstatic char *\ngen_repair_commit_desc (GList *repaired_files, GList *repaired_folders)\n{\n    GString *desc = g_string_new(\"Repaired by system.\");\n    GList *p;\n    char *path;\n\n    if (!repaired_files && !repaired_folders)\n        return g_string_free (desc, FALSE);\n\n    if (repaired_files) {\n        g_string_append (desc, \"\\nDamaged files:\\n\");\n        for (p = repaired_files; p; p = p->next) {\n            path = p->data;\n            g_string_append_printf (desc, \"%s\\n\", path);\n        }\n    }\n\n    if (repaired_folders) {\n        g_string_append (desc, \"\\nDamaged folders:\\n\");\n        for (p = repaired_folders; p; p = p->next) {\n            path = p->data;\n            g_string_append_printf (desc, \"%s\\n\", path);\n        }\n    }\n\n    return g_string_free (desc, FALSE);\n}\n\nstatic void\nreset_commit_to_repair (SeafRepo *repo, SeafCommit *parent, char *new_root_id,\n                        GList *repaired_files, GList *repaired_folders)\n{\n    if (seaf_delete_repo_tokens (repo) < 0) {\n        seaf_warning (\"Failed to delete repo sync tokens, abort repair.\\n\");\n        return;\n    }\n\n    char *desc = gen_repair_commit_desc (repaired_files, repaired_folders);\n\n    SeafCommit *new_commit = NULL;\n    new_commit = seaf_commit_new (NULL, repo->id, new_root_id,\n                                  parent->creator_name, parent->creator_id,\n                                  desc, 0);\n    g_free (desc);\n    if (!new_commit) {\n        seaf_warning (\"Out of memory, stop to run fsck for repo %.8s.\\n\",\n                      repo->id);\n        return;\n    }\n\n    new_commit->parent_id = g_strdup (parent->commit_id);\n    seaf_repo_to_commit (repo, new_commit);\n\n    seaf_message (\"Update repo %.8s status to commit %.8s.\\n\",\n                  repo->id, new_commit->commit_id);\n    seaf_branch_set_commit (repo->head, new_commit->commit_id);\n    if (seaf_branch_manager_add_branch (seaf->branch_mgr, repo->head) < 0) {\n        seaf_warning (\"Update head of repo %.8s to commit %.8s failed, \"\n                      \"recover failed.\\n\", repo->id, new_commit->commit_id);\n    } else {\n        seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit);\n    }\n    seaf_commit_unref (new_commit);\n}\n\n/*\n * check and recover repo, for damaged file or folder set it empty\n */\nstatic void\ncheck_and_recover_repo (SeafRepo *repo, gboolean reset, FsckOptions *options)\n{\n    FsckData fsck_data;\n    SeafCommit *rep_commit = NULL;\n    char *root_id = NULL;\n\n    seaf_message (\"Checking file system integrity of repo %s(%.8s)...\\n\",\n                  repo->name, repo->id);\n\n    rep_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id,\n                                                 repo->version, repo->head->commit_id);\n    if (!rep_commit) {\n        seaf_warning (\"Failed to load commit %s of repo %s\\n\",\n                      repo->head->commit_id, repo->id);\n        return;\n    }\n\n    memset (&fsck_data, 0, sizeof(fsck_data));\n    fsck_data.options = options;\n    fsck_data.repo = repo;\n    fsck_data.existing_blocks = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                       g_free, NULL);\n    if (options->check_file_size) {\n        fsck_data.truncate_time = seaf_repo_manager_get_repo_truncate_time (seaf->repo_mgr,\n                                                                            repo->id);\n    }\n\n    root_id = fsck_check_dir_recursive (rep_commit->root_id, \"/\", &fsck_data);\n    g_hash_table_destroy (fsck_data.existing_blocks);\n    if (root_id == NULL) {\n        goto out;\n    }\n\n    if (options->repair) {\n        if (strcmp (root_id, rep_commit->root_id) != 0) {\n            // some fs objects damaged for the head commit,\n            // create new head commit using the new root_id\n            reset_commit_to_repair (repo, rep_commit, root_id,\n                                    fsck_data.repaired_files,\n                                    fsck_data.repaired_folders);\n        } else if (reset) {\n            // for reset commit but fs objects not damaged, also create a repaired commit\n            reset_commit_to_repair (repo, rep_commit, rep_commit->root_id,\n                                    NULL, NULL);\n        }\n    }\n\nout:\n    g_list_free_full (fsck_data.repaired_files, g_free);\n    g_list_free_full (fsck_data.repaired_folders, g_free);\n    g_free (root_id);\n    seaf_commit_unref (rep_commit);\n}\n\nstatic gint\ncompare_commit_by_ctime (gconstpointer a, gconstpointer b)\n{\n    const SeafCommit *commit_a = a;\n    const SeafCommit *commit_b = b;\n\n    return (commit_b->ctime - commit_a->ctime);\n}\n\nstatic gboolean\nfsck_get_repo_commit (const char *repo_id, int version,\n                      const char *obj_id, void *commit_list)\n{\n    void *data = NULL;\n    int data_len;\n    GList **cur_list = (GList **)commit_list;\n\n    int ret = seaf_obj_store_read_obj (seaf->commit_mgr->obj_store, repo_id,\n                                       version, obj_id, &data, &data_len);\n    if (ret < 0 || data == NULL)\n        return TRUE;\n\n    SeafCommit *cur_commit = seaf_commit_from_data (obj_id, data, data_len);\n    if (cur_commit != NULL) {\n       *cur_list = g_list_prepend (*cur_list, cur_commit);\n    }\n\n    g_free(data);\n    return TRUE;\n}\n\nstatic SeafRepo*\nget_available_repo (char *repo_id, gboolean repair)\n{\n    GList *commit_list = NULL;\n    GList *temp_list = NULL;\n    SeafCommit *temp_commit = NULL;\n    SeafBranch *branch = NULL;\n    SeafRepo *repo = NULL;\n    SeafVirtRepo *vinfo = NULL;\n    gboolean io_error;\n\n    seaf_message (\"Scanning available commits...\\n\");\n\n    seaf_obj_store_foreach_obj (seaf->commit_mgr->obj_store, repo_id,\n                                1, fsck_get_repo_commit, &commit_list);\n\n    if (commit_list == NULL) {\n        seaf_warning (\"No available commits for repo %.8s, can't be repaired.\\n\",\n                      repo_id);\n        return NULL;\n    }\n\n    commit_list = g_list_sort (commit_list, compare_commit_by_ctime);\n\n    repo = seaf_repo_new (repo_id, NULL, NULL);\n    if (repo == NULL) {\n        seaf_warning (\"Out of memory, stop to run fsck for repo %.8s.\\n\",\n                      repo_id);\n        goto out;\n    }\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id);\n    if (vinfo) {\n        repo->is_virtual = TRUE;\n        memcpy (repo->store_id, vinfo->origin_repo_id, 36);\n        seaf_virtual_repo_info_free (vinfo);\n    } else {\n        repo->is_virtual = FALSE;\n        memcpy (repo->store_id, repo->id, 36);\n    }\n\n    for (temp_list = commit_list; temp_list; temp_list = temp_list->next) {\n        temp_commit = temp_list->data;\n        io_error = FALSE;\n\n        if (!fsck_verify_seafobj (repo->store_id, 1, temp_commit->root_id,\n                                  &io_error, VERIFY_DIR, repair)) {\n            if (io_error) {\n                seaf_repo_unref (repo);\n                repo = NULL;\n                goto out;\n            }\n            // fs object of this commit is damaged,\n            // continue to verify next\n            continue;\n        }\n\n        branch = seaf_branch_new (\"master\", repo_id, temp_commit->commit_id);\n        if (branch == NULL) {\n            seaf_warning (\"Out of memory, stop to run fsck for repo %.8s.\\n\",\n                          repo_id);\n            seaf_repo_unref (repo);\n            repo = NULL;\n            goto out;\n        }\n        repo->head = branch;\n        seaf_repo_from_commit (repo, temp_commit);\n\n        char time_buf[64];\n        strftime (time_buf, 64, \"%Y-%m-%d %H:%M:%S\", localtime((time_t *)&temp_commit->ctime));\n        seaf_message (\"Find available commit %.8s(created at %s) for repo %.8s.\\n\",\n                      temp_commit->commit_id, time_buf, repo_id);\n        break;\n    }\n\nout:\n    for (temp_list = commit_list; temp_list; temp_list = temp_list->next) {\n        temp_commit = temp_list->data;\n        seaf_commit_unref (temp_commit);\n    }\n    g_list_free (commit_list);\n\n    if (!repo || !repo->head) {\n        seaf_warning(\"No available commits found for repo %.8s, can't be repaired.\\n\",\n                     repo_id);\n        seaf_repo_unref (repo);\n        return NULL;\n    }\n\n    return repo;\n}\n\nstatic void\nrepair_repo(char *repo_id, FsckOptions *options)\n{\n    gboolean exists;\n    gboolean reset = FALSE;\n    SeafRepo *repo;\n    gboolean io_error;\n\n    seaf_message (\"Running fsck for repo %s.\\n\", repo_id);\n\n        if (!is_uuid_valid (repo_id)) {\n            seaf_warning (\"Invalid repo id %s.\\n\", repo_id);\n            goto next;\n        }\n\n        exists = seaf_repo_manager_repo_exists (seaf->repo_mgr, repo_id);\n        if (!exists) {\n            seaf_warning (\"Repo %.8s doesn't exist.\\n\", repo_id);\n            goto next;\n        }\n\n        repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n\n        if (!repo) {\n            seaf_message (\"Repo %.8s HEAD commit is damaged, \"\n                          \"need to restore to an old version.\\n\", repo_id);\n            repo = get_available_repo (repo_id, options->repair);\n            if (!repo) {\n                goto next;\n            }\n            reset = TRUE;\n        } else {\n            SeafCommit *commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id,\n                                                                 repo->version,\n                                                                 repo->head->commit_id);\n            if (!commit) {\n                seaf_warning (\"Failed to get head commit %s of repo %s\\n\",\n                              repo->head->commit_id, repo->id);\n                seaf_repo_unref (repo);\n                goto next;\n            }\n\n            io_error = FALSE;\n            if (!fsck_verify_seafobj (repo->store_id, repo->version,\n                                      commit->root_id,  &io_error,\n                                      VERIFY_DIR, options->repair)) {\n                if (io_error) {\n                    seaf_commit_unref (commit);\n                    seaf_repo_unref (repo);\n                    goto next;\n                } else {\n                    // root fs object is damaged, get available commit\n                    seaf_message (\"Repo %.8s HEAD commit is damaged, \"\n                                  \"need to restore to an old version.\\n\", repo_id);\n                    seaf_commit_unref (commit);\n                    seaf_repo_unref (repo);\n                    repo = get_available_repo (repo_id, options->repair);\n                    if (!repo) {\n                        goto next;\n                    }\n                    reset = TRUE;\n                }\n            } else {\n                // head commit is available\n                seaf_commit_unref (commit);\n            }\n        }\n\n        check_and_recover_repo (repo, reset, options);\n\n        seaf_repo_unref (repo);\nnext:\n        seaf_message (\"Fsck finished for repo %.8s.\\n\\n\", repo_id);\n}\n\nstatic void\nrepair_repo_with_thread_pool(gpointer data, gpointer user_data)\n{\n    CheckAndRecoverRepoObj *obj = data;\n\n    repair_repo(obj->repo_id, obj->options);\n\n    g_free(obj);\n}\n\nstatic void\nrepair_repos (GList *repo_id_list, FsckOptions *options)\n{\n    GList *ptr;\n    char *repo_id;\n    GThreadPool *pool;\n\n    if (options->max_thread_num) {\n        pool = g_thread_pool_new(\n            (GFunc)repair_repo_with_thread_pool, NULL, options->max_thread_num, FALSE, NULL);\n        if (!pool) {\n            seaf_warning (\"Failed to create check and recover repo thread pool.\\n\");\n            return;\n        }\n    }\n\n    for (ptr = repo_id_list; ptr; ptr = ptr->next) {\n        repo_id = ptr->data;\n\n        if (options->max_thread_num) {\n            CheckAndRecoverRepoObj *obj = g_new0(CheckAndRecoverRepoObj, 1);\n            obj->repo_id = repo_id;\n            obj->options = options;\n            g_thread_pool_push(pool, obj, NULL);\n        } else {\n            repair_repo(repo_id, options);\n        }\n     }\n\n    if (options->max_thread_num) {\n        g_thread_pool_free(pool, FALSE, TRUE);\n    }\n}\n\nint\nseaf_fsck (GList *repo_id_list, FsckOptions *options)\n{\n    if (!repo_id_list)\n        repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr);\n\n    repair_repos (repo_id_list, options);\n\n    while (repo_id_list) {\n        g_free (repo_id_list->data);\n        repo_id_list = g_list_delete_link (repo_id_list, repo_id_list);\n    }\n\n    return 0;\n}\n\n/* Export files. */\n\n/*static gboolean\nwrite_enc_block_to_file (const char *repo_id,\n                         int version,\n                         const char *block_id,\n                         SeafileCrypt *crypt,\n                         int fd,\n                         const char *path)\n{\n    BlockHandle *handle;\n    BlockMetadata *bmd;\n    char buf[64 * 1024];\n    int n;\n    int remain;\n    EVP_CIPHER_CTX ctx;\n    char *dec_out;\n    int dec_out_len;\n    gboolean ret = TRUE;\n\n    bmd = seaf_block_manager_stat_block (seaf->block_mgr,\n                                         repo_id, version,\n                                         block_id);\n    if (!bmd) {\n        seaf_warning (\"Failed to stat block %s.\\n\", block_id);\n        return FALSE;\n    }\n\n    handle = seaf_block_manager_open_block (seaf->block_mgr,\n                                            repo_id, version,\n                                            block_id, BLOCK_READ);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s.\\n\", block_id);\n        g_free (bmd);\n        return FALSE;\n    }\n\n    if (seafile_decrypt_init (&ctx, crypt->version,\n                              crypt->key, crypt->iv) < 0) {\n        seaf_warning (\"Failed to init decrypt.\\n\");\n        ret = FALSE;\n        goto out;\n    }\n\n    remain = bmd->size;\n    while (1) {\n        n = seaf_block_manager_read_block (seaf->block_mgr, handle, buf, sizeof(buf));\n        if (n < 0) {\n            seaf_warning (\"Failed to read block %s.\\n\", block_id);\n            ret = FALSE;\n            break;\n        } else if (n == 0) {\n            break;\n        }\n        remain -= n;\n\n        dec_out = g_new0 (char, n + 16);\n        if (!dec_out) {\n            seaf_warning (\"Failed to alloc memory.\\n\");\n            ret = FALSE;\n            break;\n        }\n\n        if (EVP_DecryptUpdate (&ctx,\n                               (unsigned char *)dec_out,\n                               &dec_out_len,\n                               (unsigned char *)buf,\n                               n) == 0) {\n            seaf_warning (\"Failed to decrypt block %s .\\n\", block_id);\n            g_free (dec_out);\n            ret = FALSE;\n            break;\n        }\n\n        if (writen (fd, dec_out, dec_out_len) != dec_out_len) {\n            seaf_warning (\"Failed to write block %s to file %s.\\n\",\n                          block_id, path);\n            g_free (dec_out);\n            ret = FALSE;\n            break;\n        }\n\n        if (remain == 0) {\n            if (EVP_DecryptFinal_ex (&ctx,\n                                     (unsigned char *)dec_out,\n                                     &dec_out_len) == 0) {\n                seaf_warning (\"Failed to decrypt block %s .\\n\", block_id);\n                g_free (dec_out);\n                ret = FALSE;\n                break;\n            }\n            if (dec_out_len > 0) {\n                if (writen (fd, dec_out, dec_out_len) != dec_out_len) {\n                    seaf_warning (\"Failed to write block %s to file %s.\\n\",\n                                  block_id, path);\n                    g_free (dec_out);\n                    ret = FALSE;\n                    break;\n                }\n            }\n        }\n\n        g_free (dec_out);\n    }\n\n    EVP_CIPHER_CTX_cleanup (&ctx);\n\nout:\n    g_free (bmd);\n    seaf_block_manager_close_block (seaf->block_mgr, handle);\n    seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n\n    return ret;\n}*/\n\nstatic gboolean\nwrite_nonenc_block_to_file (const char *repo_id,\n                            int version,\n                            const char *block_id,\n                            const gint64 mtime,\n                            int fd,\n                            const char *path)\n{\n    BlockHandle *handle;\n    char buf[64 * 1024];\n    gboolean ret = TRUE;\n    int n;\n\n    handle = seaf_block_manager_open_block (seaf->block_mgr,\n                                            repo_id, version,\n                                            block_id, BLOCK_READ);\n    if (!handle) {\n        return FALSE;\n    }\n\n    while (1) {\n        n = seaf_block_manager_read_block (seaf->block_mgr, handle, buf, sizeof(buf));\n        if (n < 0) {\n            seaf_warning (\"Failed to read block %s.\\n\", block_id);\n            ret = FALSE;\n            break;\n        } else if (n == 0) {\n            break;\n        }\n\n        if (writen (fd, buf, n) != n) {\n            seaf_warning (\"Failed to write block %s to file %s.\\n\",\n                          block_id, path);\n            ret = FALSE;\n            break;\n        }\n    }\n\n    struct utimbuf timebuf;\n\n    timebuf.modtime = mtime;\n    timebuf.actime = mtime;\n\n    if(utime(path, &timebuf) == -1) {\n      seaf_warning (\"Current file (%s) lose it\\\"s mtime.\\n\", path);\n    }\n\n    seaf_block_manager_close_block (seaf->block_mgr, handle);\n    seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n\n    return ret;\n}\n\nstatic void\ncreate_file (const char *repo_id,\n             const char *file_id,\n             const gint64 mtime,\n             const char *path)\n{\n    int i;\n    char *block_id;\n    int fd;\n    Seafile *seafile;\n    gboolean ret = TRUE;\n    int version = 1;\n\n    fd = g_open (path, O_CREAT | O_WRONLY | O_BINARY, 0666);\n    if (fd < 0) {\n        seaf_warning (\"Open file %s failed: %s.\\n\", path, strerror (errno));\n        return;\n    }\n\n    seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr, repo_id,\n                                           version, file_id);\n    if (!seafile) {\n        ret = FALSE;\n        goto out;\n    }\n\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        block_id = seafile->blk_sha1s[i];\n\n        ret = write_nonenc_block_to_file (repo_id, version, block_id, mtime,\n                                          fd, path);\n        if (!ret) {\n            break;\n        }\n    }\n\nout:\n    close (fd);\n    if (!ret) {\n        if (g_unlink (path) < 0) {\n            seaf_warning (\"Failed to delete file %s: %s.\\n\", path, strerror (errno));\n        }\n        seaf_message (\"Failed to export file %s.\\n\", path);\n    } else {\n        seaf_message (\"Export file %s.\\n\", path);\n    }\n    seafile_unref (seafile);\n}\n\nstatic void\nexport_repo_files_recursive (const char *repo_id,\n                             const char *id,\n                             const char *parent_dir)\n{\n    SeafDir *dir;\n    GList *p;\n    SeafDirent *seaf_dent;\n    char *path;\n\n    SeafFSManager *mgr = seaf->fs_mgr;\n    int version = 1;\n\n    dir = seaf_fs_manager_get_seafdir (mgr, repo_id, version, id);\n    if (!dir) {\n        return;\n    }\n\n    for (p = dir->entries; p; p = p->next) {\n        seaf_dent = p->data;\n        path = g_build_filename (parent_dir, seaf_dent->name, NULL);\n\n        if (S_ISREG(seaf_dent->mode)) {\n            // create file\n            create_file (repo_id, seaf_dent->id, seaf_dent->mtime, path);\n        } else if (S_ISDIR(seaf_dent->mode)) {\n            if (g_mkdir (path, 0777) < 0) {\n                seaf_warning (\"Failed to mkdir %s: %s.\\n\", path,\n                              strerror (errno));\n                g_free (path);\n                continue;\n            } else {\n                seaf_message (\"Export dir %s.\\n\", path);\n            }\n\n            export_repo_files_recursive (repo_id, seaf_dent->id, path);\n        }\n        g_free (path);\n    }\n\n    seaf_dir_free (dir);\n}\n\nstatic SeafCommit*\nget_available_commit (const char *repo_id)\n{\n    GList *commit_list = NULL;\n    GList *temp_list = NULL;\n    GList *next_list = NULL;\n    SeafCommit *temp_commit = NULL;\n    gboolean io_error;\n\n    seaf_message (\"Scanning available commits for repo %s...\\n\", repo_id);\n\n    seaf_obj_store_foreach_obj (seaf->commit_mgr->obj_store, repo_id,\n                                1, fsck_get_repo_commit, &commit_list);\n\n    if (commit_list == NULL) {\n        seaf_warning (\"No available commits for repo %.8s, export failed.\\n\\n\",\n                      repo_id);\n        return NULL;\n    }\n\n    commit_list = g_list_sort (commit_list, compare_commit_by_ctime);\n    temp_list = commit_list;\n    while (temp_list) {\n        next_list = temp_list->next;\n        temp_commit = temp_list->data;\n        io_error = FALSE;\n\n        if (memcmp (temp_commit->root_id, EMPTY_SHA1, 40) == 0) {\n            seaf_commit_unref (temp_commit);\n            temp_commit = NULL;\n            temp_list = next_list;\n            continue;\n        } else if (!fsck_verify_seafobj (repo_id, 1, temp_commit->root_id,\n                                         &io_error, VERIFY_DIR, FALSE)) {\n            seaf_commit_unref (temp_commit);\n            temp_commit = NULL;\n            temp_list = next_list;\n\n            if (io_error) {\n                break;\n            }\n            // fs object of this commit is damaged,\n            // continue to verify next\n            continue;\n        }\n\n        char time_buf[64];\n        strftime (time_buf, 64, \"%Y-%m-%d %H:%M:%S\", localtime((time_t *)&temp_commit->ctime));\n        seaf_message (\"Find available commit %.8s(created at %s), will export files from it.\\n\",\n                      temp_commit->commit_id, time_buf);\n        temp_list = next_list;\n        break;\n    }\n\n    while (temp_list) {\n        seaf_commit_unref (temp_list->data);\n        temp_list = temp_list->next;\n    }\n    g_list_free (commit_list);\n\n    if (!temp_commit && !io_error) {\n        seaf_warning (\"No available commits for repo %.8s, export failed.\\n\\n\",\n                      repo_id);\n    }\n\n    return temp_commit;\n}\n\nvoid\nexport_repo_files (const char *repo_id,\n                   const char *init_path,\n                   GHashTable *enc_repos)\n{\n    SeafCommit *commit = get_available_commit (repo_id);\n    if (!commit) {\n        return;\n    }\n    if (commit->encrypted) {\n        g_hash_table_insert (enc_repos, g_strdup (repo_id),\n                             g_strdup (commit->repo_name));\n        seaf_commit_unref (commit);\n        return;\n    }\n\n    seaf_message (\"Start to export files for repo %.8s(%s).\\n\",\n                  repo_id, commit->repo_name);\n\n    char *dir_name = g_strdup_printf (\"%.8s_%s_%s\", repo_id,\n                                      commit->repo_name,\n                                      commit->creator_name);\n    char * export_path = g_build_filename (init_path, dir_name, NULL);\n    g_free (dir_name);\n    if (g_mkdir (export_path, 0777) < 0) {\n        seaf_warning (\"Failed to create export dir %s: %s, export failed.\\n\",\n                      export_path, strerror (errno));\n        g_free (export_path);\n        seaf_commit_unref (commit);\n        return;\n    }\n\n    export_repo_files_recursive (repo_id, commit->root_id, export_path);\n\n    seaf_message (\"Finish exporting files for repo %.8s.\\n\\n\", repo_id);\n\n    g_free (export_path);\n    seaf_commit_unref (commit);\n}\n\nstatic GList *\nget_repo_ids (const char *seafile_dir)\n{\n    GList *repo_ids = NULL;\n    char *commit_path = g_build_filename (seafile_dir, \"storage\",\n                                          \"commits\", NULL);\n    GError *error = NULL;\n\n    GDir *dir = g_dir_open (commit_path, 0, &error);\n    if (!dir) {\n        seaf_warning (\"Open dir %s failed: %s.\\n\",\n                      commit_path, error->message);\n        g_clear_error (&error);\n        g_free (commit_path);\n        return NULL;\n    }\n\n    const char *file_name;\n    while ((file_name = g_dir_read_name (dir)) != NULL) {\n        repo_ids = g_list_prepend (repo_ids, g_strdup (file_name));\n    }\n    g_dir_close (dir);\n\n    g_free (commit_path);\n\n    return repo_ids;\n}\n\nstatic void\nprint_enc_repo (gpointer key, gpointer value, gpointer user_data)\n{\n    seaf_message (\"%s(%s)\\n\", (char *)key, (char *)value);\n}\n\nvoid\nexport_file (GList *repo_id_list, const char *seafile_dir, char *export_path)\n{\n    struct stat dir_st;\n\n    if (stat (export_path, &dir_st) < 0) {\n        if (errno == ENOENT) {\n            if (g_mkdir (export_path, 0777) < 0) {\n                seaf_warning (\"Mkdir %s failed: %s.\\n\",\n                              export_path, strerror (errno));\n                return;\n            }\n        } else {\n            seaf_warning (\"Stat path: %s failed: %s.\\n\",\n                          export_path, strerror (errno));\n            return;\n        }\n    } else {\n        if (!S_ISDIR(dir_st.st_mode)) {\n            seaf_warning (\"%s already exist, but it is not a directory.\\n\",\n                          export_path);\n            return;\n        }\n    }\n\n    if (!repo_id_list) {\n        repo_id_list = get_repo_ids (seafile_dir);\n        if (!repo_id_list)\n            return;\n    }\n\n    GList *iter = repo_id_list;\n    char *repo_id;\n    GHashTable *enc_repos = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                   g_free, g_free);\n\n    for (; iter; iter=iter->next) {\n        repo_id = iter->data;\n        if (!is_uuid_valid (repo_id)) {\n            seaf_warning (\"Invalid repo id: %s.\\n\", repo_id);\n            continue;\n        }\n\n        export_repo_files (repo_id, export_path, enc_repos);\n    }\n\n    if (g_hash_table_size (enc_repos) > 0) {\n        seaf_message (\"The following repos are encrypted and are not exported:\\n\");\n        g_hash_table_foreach (enc_repos, print_enc_repo, NULL);\n    }\n\n    while (repo_id_list) {\n        g_free (repo_id_list->data);\n        repo_id_list = g_list_delete_link (repo_id_list, repo_id_list);\n    }\n    g_hash_table_destroy (enc_repos);\n    g_free (export_path);\n}\n"
  },
  {
    "path": "server/gc/fsck.h",
    "content": "#ifndef SEAF_FSCK_H\n#define SEAF_FSCK_H\n\ntypedef struct FsckOptions {\n    int max_thread_num;\n    gboolean check_integrity;\n    gboolean check_file_size;\n    gboolean repair;\n} FsckOptions;\n\nint\nseaf_fsck (GList *repo_id_list, FsckOptions *options);\n\nvoid export_file (GList *repo_id_list, const char *seafile_dir, char *export_path);\n\n#endif\n"
  },
  {
    "path": "server/gc/gc-core.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"seafile-session.h\"\n#include \"bloom-filter.h\"\n#include \"gc-core.h\"\n#include \"utils.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n\n#include <time.h>\n#define MAX_BF_SIZE (((size_t)1) << 29)   /* 64 MB */\n\n#define KEEP_ALIVE_PER_OBJS 100\n#define KEEP_ALIVE_PER_SECOND 1\n\n/*\n * The number of bits in the bloom filter is 4 times the number of all blocks.\n * Let m be the bits in the bf, n be the number of blocks to be added to the bf\n * (the number of live blocks), and k = 3 (closed to optimal for m/n = 4),\n * the probability of false-positive is\n *\n *     p = (1 - e^(-kn/m))^k = 0.15\n *\n * Because m = 4 * total_blocks >= 4 * (live blocks) = 4n, we should have p <= 0.15.\n * Put it another way, we'll clean up at least 85% dead blocks in each gc operation.\n * See http://en.wikipedia.org/wiki/Bloom_filter.\n *\n * Supose we have 8TB space, and the avg block size is 1MB, we'll have 8M blocks, then\n * the size of bf is (8M * 4)/8 = 4MB.\n *\n * If total_blocks is a small number (e.g. < 100), we should try to clean all dead blocks.\n * So we set the minimal size of the bf to 1KB.\n */\n\n/*\n * Online GC algorithm\n *\n * There is a table `GCID` in the seafile database. Every time GC is run for a repo,\n * a new GC ID (UUID) will be generated and inserted into this table.\n * \n * Other threads that want to update the branch head of a repo must do so as follows:\n * 1. Read the GC ID from the table before wrting blocks;\n * 2. begin a transaction;\n * 3. Read the GC ID again with `SELECT ... FOR UPDATE`;\n * 4. Compare the new GC ID with the previous one. If they are the same, proceed to\n *    update the branch head; otherwise, a GC operation has been run between\n *    steps 1 and 3, the branch update operation must be failed.\n * 5. Commit or rollback the transaction.\n *\n * For syncing clients, the algorithm is a bit more complicated.\n * Because writing blocks and updating the branch head is not executed in the same\n * context (or more precisely, not in the same thread), the GC ID read in step 1\n * has to be stored into a database table `LastGCID (client_token, gc_id)`.\n * After step 4, no matter the branch update succeeds or not, the entry in `LastGCID`\n * table has to be deleted.\n */\n\nstatic Bloom *\nalloc_gc_index (const char *repo_id, guint64 total_blocks)\n{\n    size_t size;\n\n    size = (size_t) MAX(total_blocks << 2, 1 << 13);\n    size = MIN (size, MAX_BF_SIZE);\n\n    seaf_message (\"GC index size is %u Byte for repo %.8s.\\n\",\n                  (int)size >> 3, repo_id);\n\n    return bloom_create (size, 3, 0);\n}\n\ntypedef struct {\n    SeafRepo *repo;\n    Bloom *blocks_index;\n    Bloom *fs_index;\n    GHashTable *visited;\n    GHashTable *visited_commits;\n\n    /* > 0: keep a period of history;\n     * == 0: only keep data in head commit;\n     * < 0: keep all history data.\n     */\n    gint64 truncate_time;\n    gboolean traversed_head;\n\n    int traversed_commits;\n    gint64 traversed_blocks;\n\n    int verbose;\n    gint64 traversed_fs_objs;\n\n    SeafDBTrans *trans;\n    gint64 keep_alive_last_time;\n    gint64 keep_alive_obj_counter;\n\n    gboolean traverse_base_commit;\n} GCData;\n\nstatic int\nadd_blocks_to_index (SeafFSManager *mgr, GCData *data, const char *file_id)\n{\n    SeafRepo *repo = data->repo;\n    Bloom *blocks_index = data->blocks_index;\n    Seafile *seafile;\n    int i;\n\n    seafile = seaf_fs_manager_get_seafile (mgr, repo->store_id, repo->version, file_id);\n    if (!seafile) {\n        seaf_warning (\"Failed to find file %s:%s.\\n\", repo->store_id, file_id);\n        return -1;\n    }\n\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        bloom_add (blocks_index, seafile->blk_sha1s[i]);\n        ++data->traversed_blocks;\n    }\n\n    seafile_unref (seafile);\n\n    return 0;\n}\n\nstatic void\nadd_fs_to_index(GCData *data, const char *file_id)\n{\n    Bloom *fs_index = data->fs_index;\n    if (fs_index) {\n        bloom_add (fs_index, file_id);\n    }\n    ++(data->traversed_fs_objs);\n}\n\nstatic gboolean\nfs_callback (SeafFSManager *mgr,\n             const char *store_id,\n             int version,\n             const char *obj_id,\n             int type,\n             void *user_data,\n             gboolean *stop)\n{\n    GCData *data = user_data;\n\n    if (data->visited != NULL) {\n        if (g_hash_table_lookup (data->visited, obj_id) != NULL) {\n            *stop = TRUE;\n            return TRUE;\n        }\n\n        char *key = g_strdup(obj_id);\n        g_hash_table_replace (data->visited, key, key);\n    }\n\n    if (data->trans) {\n        ++(data->keep_alive_obj_counter);\n\n        if (data->keep_alive_obj_counter >= KEEP_ALIVE_PER_OBJS &&\n            ((gint64)time(NULL) - data->keep_alive_last_time) >= KEEP_ALIVE_PER_SECOND)\n        {\n            data->keep_alive_last_time = (gint64)time(NULL);\n            data->keep_alive_obj_counter = 0;\n            seaf_db_trans_query(data->trans, \"SELECT 1;\", 0);\n        }\n    }\n\n    add_fs_to_index(data, obj_id);\n\n    // If traversing the base_commit, only the fs objects need to be retained, while the block does not.\n    // This is because only the fs objects are needed when merging virtual repo.\n    if (data->traverse_base_commit) {\n        return TRUE;\n    }\n\n    if (type == SEAF_METADATA_TYPE_FILE &&\n        add_blocks_to_index (mgr, data, obj_id) < 0)\n        return FALSE;\n\n    return TRUE;\n}\n\nstatic gboolean\ntraverse_commit (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    GCData *data = vdata;\n    int ret;\n\n    if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) {\n        // Has traversed on prev head commit, stop traverse from this branch\n        *stop = TRUE;\n        return TRUE;\n    }\n\n    if (data->truncate_time == 0)\n    {\n        *stop = TRUE;\n        /* Stop after traversing the head commit. */\n    }\n    else if (data->truncate_time > 0 &&\n             (gint64)(commit->ctime) < data->truncate_time &&\n             data->traversed_head)\n    {\n        /* Still traverse the first commit older than truncate_time.\n         * If a file in the child commit of this commit is deleted,\n         * we need to access this commit in order to restore it\n         * from trash.\n         */\n        *stop = TRUE;\n    }\n\n    if (!data->traversed_head)\n        data->traversed_head = TRUE;\n\n    if (data->verbose)\n        seaf_message (\"Traversing commit %.8s for repo %.8s.\\n\",\n                      commit->commit_id, data->repo->id);\n\n    ++data->traversed_commits;\n\n    data->traversed_fs_objs = 0;\n\n    ret = seaf_fs_manager_traverse_tree (seaf->fs_mgr,\n                                         data->repo->store_id, data->repo->version,\n                                         commit->root_id,\n                                         fs_callback,\n                                         data, FALSE);\n    if (ret < 0)\n        return FALSE;\n\n    int dummy;\n    g_hash_table_replace (data->visited_commits,\n                          g_strdup (commit->commit_id), &dummy);\n\n    if (data->verbose)\n        seaf_message (\"Traversed %\"G_GINT64_FORMAT\" fs objects for repo %.8s.\\n\",\n                      data->traversed_fs_objs, data->repo->id);\n\n    return TRUE;\n}\n\nstatic int\nupdate_gc_id (SeafRepo *repo, SeafDBTrans *trans)\n{\n    char *sql;\n    char *gc_id;\n    gboolean id_exists, db_err = FALSE;\n    int ret;\n\n    sql = \"SELECT 1 FROM GCID WHERE repo_id = ?\";\n    id_exists = seaf_db_trans_check_for_existence (trans, sql, &db_err,\n                                                   1, \"string\", repo->id);\n\n    gc_id = gen_uuid ();\n    if (id_exists) {\n        sql = \"UPDATE GCID SET gc_id = ? WHERE repo_id = ?\";\n        ret = seaf_db_trans_query (trans, sql, 2,\n                                   \"string\", gc_id, \"string\", repo->id);\n    } else {\n        sql = \"INSERT INTO GCID (repo_id, gc_id) VALUES (?, ?)\";\n        ret = seaf_db_trans_query (trans, sql, 2,\n                                   \"string\", repo->id, \"string\", gc_id);\n    }\n    g_free (gc_id);\n\n    return ret;\n}\n\nstatic void\nupdate_valid_since_time (SeafRepo *repo, gint64 new_time)\n{\n    gint64 old_time = seaf_repo_manager_get_repo_valid_since (repo->manager,\n                                                              repo->id);\n\n    if (new_time > 0) {\n        if (new_time > old_time)\n            seaf_repo_manager_set_repo_valid_since (repo->manager,\n                                                    repo->id,\n                                                    new_time);\n    } else if (new_time == 0) {\n        /* Only the head commit is valid after GC if no history is kept. */\n        SeafCommit *head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                           repo->id, repo->version,\n                                                           repo->head->commit_id);\n        if (head && (old_time < 0 || head->ctime > (guint64)old_time))\n            seaf_repo_manager_set_repo_valid_since (repo->manager,\n                                                    repo->id,\n                                                    head->ctime);\n        seaf_commit_unref (head);\n    }\n}\n\nstatic GCData *\ngc_data_new (SeafRepo *repo, Bloom *blocks_index, Bloom *fs_index, int verbose)\n{\n    GCData *data;\n    data = g_new0(GCData, 1);\n    seaf_repo_ref(repo);\n    data->repo = repo;\n    data->blocks_index = blocks_index;\n    data->fs_index = fs_index;\n    data->visited = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n    data->visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                   g_free, NULL);\n    data->verbose = verbose;\n\n    gint64 truncate_time;\n    truncate_time = seaf_repo_manager_get_repo_truncate_time (repo->manager,\n                                                              repo->id);\n    update_valid_since_time (repo, truncate_time);\n    data->truncate_time = truncate_time;\n\n    data->keep_alive_last_time = (gint64)time(NULL);\n    data->keep_alive_obj_counter = 0;\n\n    return data;\n}\n\nstatic void\ngc_data_free (GCData *data)\n{\n    if (!data)\n        return;\n\n    seaf_repo_unref(data->repo);\n    g_hash_table_destroy (data->visited);\n    g_hash_table_destroy (data->visited_commits);\n    g_free (data);\n\n    return;\n}\n\nstatic gint64\npopulate_gc_index_for_repo_for_new_commits (GCData *data, SeafDBTrans *trans)\n{\n    SeafBranch *new_branch = NULL;\n    gint64 n_blocks_last = 0;\n    int n_commits_last = 0;\n    gboolean res;\n    gint64 ret = 0;\n    SeafRepo *repo = data->repo;\n\n    if (!repo->is_virtual) {\n        if (trans != NULL && update_gc_id (repo, trans) < 0) {\n            seaf_warning (\"Failed to update GCID for repo %s.\\n\", repo->id);\n            ret = -1;\n            goto out;\n        }\n    }\n\n    n_blocks_last = data->traversed_blocks;\n    n_commits_last = data->traversed_commits;\n    data->traversed_blocks = 0;\n    data->traversed_commits = 0;\n    data->trans = trans;\n\n    new_branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo->id, \"master\");\n    if (!new_branch) {\n        seaf_warning (\"Failed to get master branch of repo %.8s.\\n\", repo->id);\n        ret = -1;\n        goto out;\n    }\n\n    if (g_strcmp0 (repo->head->commit_id, new_branch->commit_id) != 0) {\n        res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                        repo->id, repo->version,\n                                                        new_branch->commit_id,\n                                                        traverse_commit,\n                                                        data,\n                                                        FALSE);\n        if (!res) {\n            ret = -1;\n            seaf_warning (\"Failed to populate index for repo %.8s.\\n\", repo->id);\n            goto out;\n        }\n    }\n\n    seaf_message (\"Traversed %d commits, %\"G_GINT64_FORMAT\" blocks for repo %.8s.\\n\",\n                  data->traversed_commits + n_commits_last,\n                  data->traversed_blocks + n_blocks_last,\n                  repo->id);\n\n    ret = data->traversed_blocks;\n\nout:\n    seaf_branch_unref (new_branch);\n\n    return ret;\n\n}\n\nstatic gint64\npopulate_gc_index_for_repo (GCData *data, SeafDBTrans *trans)\n{\n    gboolean res;\n    gint64 ret = 0;\n    SeafRepo *repo = data->repo;\n\n    data->trans = trans;\n\n    if (!repo->is_virtual)\n        seaf_message (\"Populating index for repo %.8s.\\n\", repo->id);\n    else\n        seaf_message (\"Populating index for sub-repo %.8s.\\n\", repo->id);\n\n    res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                    repo->id, repo->version,\n                                                    repo->head->commit_id,\n                                                    traverse_commit,\n                                                    data,\n                                                    FALSE);\n    if (!res) {\n        ret = -1;\n        seaf_warning (\"Failed to populate index for repo %.8s.\\n\", repo->id);\n        return -1;\n    }\n\n    // Traverse the base commit of the virtual repo. Otherwise, if the virtual repo has not been updated for a long time,\n    // the fs object corresponding to the base commit will be removed by mistake.\n    if (!repo->is_virtual) {\n        GList *vrepo_ids = NULL, *ptr;\n        char *repo_id = NULL;\n        SeafVirtRepo *vinfo = NULL;\n        vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr,\n                                                                      repo->id);\n        for (ptr = vrepo_ids; ptr; ptr = ptr->next) {\n            repo_id = ptr->data;\n            vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id);\n            if (!vinfo) {\n                continue;\n            }\n            data->traverse_base_commit = TRUE;\n            res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                            repo->store_id, repo->version,\n                                                            vinfo->base_commit,\n                                                            traverse_commit,\n                                                            data,\n                                                            FALSE);\n            data->traverse_base_commit = FALSE;\n            seaf_virtual_repo_info_free (vinfo);\n            if (!res) {\n                seaf_warning (\"Failed to traverse base commit %s for virtual repo %s.\\n\", vinfo->base_commit, repo_id);\n                string_list_free (vrepo_ids);\n                return -1;\n            }\n        }\n        string_list_free (vrepo_ids);\n    }\n\n    ret = data->traversed_blocks;\n\n    return ret;\n}\n\n#define MAX_THREADS 10\n\ntypedef struct CheckBlockParam {\n    char *store_id;\n    int repo_version;\n    Bloom *index;\n    int dry_run;\n    GAsyncQueue *async_queue;\n    pthread_mutex_t counter_lock;\n    gint64 removed_blocks;\n} CheckBlockParam;\n\ntypedef struct CheckFSParam {\n    char *store_id;\n    int repo_version;\n    Bloom *index;\n    int dry_run;\n    GAsyncQueue *async_queue;\n    pthread_mutex_t counter_lock;\n    gint64 removed_fs;\n} CheckFSParam;\n\nstatic void\ncheck_block_liveness (gpointer data, gpointer user_data)\n{\n    char *block_id = data;\n    CheckBlockParam *param = user_data;\n\n    if (!bloom_test (param->index, block_id)) {\n        pthread_mutex_lock (&param->counter_lock);\n        param->removed_blocks ++;\n        pthread_mutex_unlock (&param->counter_lock);\n        if (!param->dry_run)\n            seaf_block_manager_remove_block (seaf->block_mgr,\n                                             param->store_id, param->repo_version,\n                                             block_id);\n    }\n\n    g_async_queue_push (param->async_queue, block_id);\n}\n\nstatic gint64\ncheck_existing_blocks (char *store_id, int repo_version, GHashTable *exist_blocks,\n                       Bloom *blocks_index, int dry_run)\n{\n    char *block_id;\n    GThreadPool *tpool = NULL;\n    GAsyncQueue *async_queue = NULL;\n    CheckBlockParam *param = NULL;\n    GHashTableIter iter;\n    gpointer key, value;\n    gint64 ret = 0;\n\n    async_queue = g_async_queue_new ();\n    param = g_new0 (CheckBlockParam, 1);\n    param->store_id = store_id;\n    param->repo_version = repo_version;\n    param->index = blocks_index;\n    param->dry_run = dry_run;\n    param->async_queue = async_queue;\n    pthread_mutex_init (&param->counter_lock, NULL);\n\n    tpool = g_thread_pool_new (check_block_liveness, param, MAX_THREADS, FALSE, NULL);\n    if (!tpool) {\n        seaf_warning (\"Failed to create thread pool for repo %s, stop gc.\\n\",\n                      store_id);\n        ret = -1;\n        goto out;\n    }\n\n    g_hash_table_iter_init (&iter, exist_blocks);\n\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        g_thread_pool_push (tpool, (char *)key, NULL);\n    }\n\n    while ((block_id = g_async_queue_pop (async_queue))) {\n        g_hash_table_remove (exist_blocks, block_id);\n        if (g_hash_table_size (exist_blocks) == 0) {\n            break;\n        }\n    }\n\n    ret = param->removed_blocks;\n\nout:\n    g_thread_pool_free (tpool, TRUE, TRUE);\n    g_async_queue_unref (async_queue);\n    g_free (param);\n\n    return ret;\n}\n\nstatic gboolean\ncollect_exist_blocks (const char *store_id, int version,\n                      const char *block_id, void *vdata)\n{\n    GHashTable *exist_blocks = vdata;\n    int dummy;\n\n    g_hash_table_replace (exist_blocks, g_strdup (block_id), &dummy);\n\n    return TRUE;\n}\n\nstatic void\ncheck_fs_liveness (gpointer data, gpointer user_data)\n{\n    char *fs_id = data;\n    CheckFSParam *param = user_data;\n\n    if (!bloom_test (param->index, fs_id)) {\n        pthread_mutex_lock (&param->counter_lock);\n        param->removed_fs ++;\n        pthread_mutex_unlock (&param->counter_lock);\n        if (!param->dry_run)\n            seaf_fs_manager_delete_object(seaf->fs_mgr,\n                                          param->store_id, param->repo_version,\n                                          fs_id);\n    }\n\n    g_async_queue_push (param->async_queue, fs_id);\n}\n\nstatic gint64\ncheck_existing_fs (char *store_id, int repo_version, GHashTable *exist_fs,\n                   Bloom *fs_index, int dry_run)\n{\n    char *fs_id;\n    GThreadPool *tpool = NULL;\n    GAsyncQueue *async_queue = NULL;\n    CheckFSParam *param = NULL;\n    GHashTableIter iter;\n    gpointer key, value;\n    gint64 ret = 0;\n\n    async_queue = g_async_queue_new ();\n    param = g_new0 (CheckFSParam, 1);\n    param->store_id = store_id;\n    param->repo_version = repo_version;\n    param->index = fs_index;\n    param->dry_run = dry_run;\n    param->async_queue = async_queue;\n    pthread_mutex_init (&param->counter_lock, NULL);\n\n    tpool = g_thread_pool_new (check_fs_liveness, param, MAX_THREADS, FALSE, NULL);\n    if (!tpool) {\n        seaf_warning (\"Failed to create thread pool for repo %s, stop gc.\\n\",\n                      store_id);\n        ret = -1;\n        goto out;\n    }\n\n    g_hash_table_iter_init (&iter, exist_fs);\n\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        g_thread_pool_push (tpool, (char *)key, NULL);\n    }\n\n    while ((fs_id = g_async_queue_pop (async_queue))) {\n        g_hash_table_remove (exist_fs, fs_id);\n        if (g_hash_table_size (exist_fs) == 0) {\n            break;\n        }\n    }\n\n    ret = param->removed_fs;\n\nout:\n    g_thread_pool_free (tpool, TRUE, TRUE);\n    g_async_queue_unref (async_queue);\n    g_free (param);\n\n    return ret;\n}\n\nstatic gboolean\ncollect_exist_fs (const char *store_id, int version,\n                   const char *fs_id, void *vdata)\n{\n    GHashTable *exist_fs = vdata;\n    int dummy;\n\n    g_hash_table_replace (exist_fs, g_strdup (fs_id), &dummy);\n\n    return TRUE;\n}\n\nstatic gint64\npopulate_gc_index_for_virtual_repos_for_new_commits (GList *virtual_repos,\n                                                     SeafDBTrans *trans)\n{\n    GList *ptr;\n    SeafRepo *vrepo;\n    gint64 scan_ret = 0;\n    gint64 ret = 0;\n    GCData *data = NULL;\n\n    for (ptr = virtual_repos; ptr; ptr = ptr->next) {\n        data = ptr->data;\n        if (!data)\n            continue;\n\n        vrepo = data->repo;\n        if (!vrepo) {\n            continue;\n        }\n\n        scan_ret = populate_gc_index_for_repo_for_new_commits (data, trans);\n        if (scan_ret < 0) {\n            ret = -1;\n            goto out;\n        }\n        ret += scan_ret;\n    }\n\nout:\n    return ret;\n}\n\nstatic gint64\npopulate_gc_index_for_virtual_repos (SeafRepo *repo,\n                                     GList **virtual_repos,\n                                     Bloom *blocks_index,\n                                     Bloom *fs_index,\n                                     SeafDBTrans *trans,\n                                     int verbose)\n{\n    GList *vrepo_ids = NULL, *ptr;\n    char *repo_id;\n    SeafRepo *vrepo;\n    gint64 scan_ret = 0;\n    gint64 ret = 0;\n    GCData *data;\n\n    vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr,\n                                                                  repo->id);\n    for (ptr = vrepo_ids; ptr; ptr = ptr->next) {\n        repo_id = ptr->data;\n        vrepo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n        if (!vrepo) {\n            seaf_warning (\"Failed to get repo %s.\\n\", repo_id);\n            ret = -1;\n            goto out;\n        }\n\n        data = gc_data_new (vrepo, blocks_index, fs_index, verbose);\n        *virtual_repos = g_list_prepend (*virtual_repos, data);\n\n        scan_ret = populate_gc_index_for_repo (data, trans);\n        seaf_repo_unref(vrepo);\n        if (scan_ret < 0) {\n            ret = -1;\n            goto out;\n        }\n        ret += scan_ret;\n    }\n\nout:\n    string_list_free (vrepo_ids);\n    return ret;\n}\n\n/*\n * @keep_days: explicitly sepecify how many days of history to keep after GC.\n *             This has higher priority than the history limit set in database.\n * @online: is running online GC. Online GC is not supported for SQLite DB.\n */\ngint64\ngc_v1_repo (SeafRepo *repo, int dry_run, int online, int verbose, int rm_fs)\n{\n    Bloom *blocks_index = NULL;\n    Bloom *fs_index = NULL;\n    GHashTable *exist_blocks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n    GHashTable *exist_fs = NULL;\n    GList *virtual_repos = NULL;\n    guint64 total_blocks = 0;\n    guint64 total_fs = 0;\n    guint64 reachable_blocks = 0;\n    gint64 removed_fs = 0;\n    gint64 ret;\n    GCData *data;\n    SeafDBTrans *trans = NULL;\n\n    ret = seaf_block_manager_foreach_block (seaf->block_mgr,\n                                            repo->store_id, repo->version,\n                                            collect_exist_blocks,\n                                            exist_blocks);\n    if (ret < 0) {\n        seaf_warning (\"Failed to collect existing blocks for repo %.8s, stop GC.\\n\\n\",\n                      repo->id);\n        g_hash_table_destroy (exist_blocks);\n        return ret;\n    }\n\n    total_blocks = g_hash_table_size (exist_blocks);\n    if (total_blocks == 0) {\n        seaf_message (\"No blocks for repo %.8s, skip GC.\\n\\n\", repo->id);\n        g_hash_table_destroy (exist_blocks);\n        return 0;\n    }\n\n     if (rm_fs) {\n        exist_fs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n        ret = seaf_obj_store_foreach_obj (seaf->fs_mgr->obj_store,\n                                          repo->store_id, repo->version,\n                                          collect_exist_fs,\n                                          exist_fs);\n        if (ret < 0) {\n            seaf_warning (\"Failed to collect existing fs for repo %.8s, stop GC.\\n\\n\",\n                        repo->id);\n            goto out;\n        }\n\n        total_fs = g_hash_table_size (exist_fs);\n    }\n\n    if (rm_fs)\n        seaf_message (\"GC started for repo %.8s. Total block number is %\"G_GUINT64_FORMAT\", total fs number is %\"G_GUINT64_FORMAT\".\\n\",\n                      repo->id, total_blocks, total_fs);\n    else\n        seaf_message (\"GC started for repo %.8s. Total block number is %\"G_GUINT64_FORMAT\".\\n\",\n                      repo->id, total_blocks);\n\n    /*\n     * Store the index of live blocks in bloom filter to save memory.\n     * Since bloom filters only have false-positive, we\n     * may skip some garbage blocks, but we won't delete\n     * blocks that are still alive.\n     */\n    blocks_index = alloc_gc_index (repo->id, total_blocks);\n    if (!blocks_index) {\n        seaf_warning (\"GC: Failed to allocate blocks index for repo %.8s, stop gc.\\n\",\n                      repo->id);\n        ret = -1;\n        goto out;\n    }\n\n    if (rm_fs && total_fs > 0) {\n        fs_index = alloc_gc_index (repo->id, total_fs);\n        if (!fs_index) {\n            seaf_warning (\"GC: Failed to allocate fs index for repo %.8s, stop gc.\\n\",\n                        repo->id);\n            ret = -1;\n            goto out;\n        }\n    }\n\n    data = gc_data_new (repo, blocks_index, fs_index, verbose);\n    ret = populate_gc_index_for_repo (data, trans);\n    if (ret < 0) {\n        goto out;\n    }\n\n    reachable_blocks += ret;\n\n    /* Since virtual repos share fs and block store with the origin repo,\n     * it's necessary to do GC for them together.\n     */\n    ret = populate_gc_index_for_virtual_repos (repo, &virtual_repos,\n                                               blocks_index, fs_index, trans, verbose);\n    if (ret < 0) {\n        goto out;\n    }\n\n    reachable_blocks += ret;\n\n    if (online) {\n        trans = seaf_db_begin_transaction (seaf->db);\n        if (!trans)\n            goto out;\n    }\n\n    ret = populate_gc_index_for_repo_for_new_commits (data, trans);\n    if (ret < 0) {\n        if (online) {\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n        }\n        goto out;\n    }\n\n    reachable_blocks += ret;\n\n\n    ret = populate_gc_index_for_virtual_repos_for_new_commits (virtual_repos, trans);\n\n    if (ret < 0) {\n        if (online) {\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n        }\n        goto out;\n    }\n\n    reachable_blocks += ret;\n\n    if (!dry_run)\n        seaf_message (\"Scanning and deleting unused blocks for repo %.8s.\\n\",\n                      repo->id);\n    else\n        seaf_message (\"Scanning unused blocks for repo %.8s.\\n\", repo->id);\n\n    ret = check_existing_blocks (repo->store_id, repo->version, exist_blocks,\n                                 blocks_index, dry_run);\n    if (ret < 0) {\n        if (online) {\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n        }\n        goto out;\n    }\n\n    if (rm_fs && total_fs > 0) {\n        removed_fs = check_existing_fs(repo->store_id, repo->version, exist_fs,\n                                       fs_index, dry_run);\n        if (removed_fs < 0) {\n            if (online) {\n                seaf_db_rollback (trans);\n                seaf_db_trans_close (trans);\n            }\n            goto out;\n        }\n    }\n\n    if (!dry_run) {\n        if (rm_fs)\n            seaf_message (\"GC finished for repo %.8s. %\"G_GUINT64_FORMAT\" blocks total, \"\n                          \"about %\"G_GUINT64_FORMAT\" reachable blocks, \"\n                          \"%\"G_GUINT64_FORMAT\" blocks are removed. \"\n                          \"%\"G_GUINT64_FORMAT\" fs are removed.\\n\",\n                          repo->id, total_blocks, reachable_blocks, ret, removed_fs);\n        else\n            seaf_message (\"GC finished for repo %.8s. %\"G_GUINT64_FORMAT\" blocks total, \"\n                          \"about %\"G_GUINT64_FORMAT\" reachable blocks, \"\n                          \"%\"G_GUINT64_FORMAT\" blocks are removed.\\n\",\n                          repo->id, total_blocks, reachable_blocks, ret);\n    } else {\n        if (rm_fs)\n            seaf_message (\"GC finished for repo %.8s. %\"G_GUINT64_FORMAT\" blocks total, \"\n                          \"about %\"G_GUINT64_FORMAT\" reachable blocks, \"\n                          \"%\"G_GUINT64_FORMAT\" blocks can be removed. \"\n                          \"%\"G_GUINT64_FORMAT\" fs can be removed.\\n\",\n                          repo->id, total_blocks, reachable_blocks, ret, removed_fs);\n        else\n            seaf_message (\"GC finished for repo %.8s. %\"G_GUINT64_FORMAT\" blocks total, \"\n                          \"about %\"G_GUINT64_FORMAT\" reachable blocks, \"\n                          \"%\"G_GUINT64_FORMAT\" blocks can be removed.\\n\",\n                          repo->id, total_blocks, reachable_blocks, ret);\n    }\n\n    if (online) {\n        if (seaf_db_commit (trans) < 0) {\n            seaf_db_rollback (trans);\n        }\n        seaf_db_trans_close (trans);\n    }\n\nout:\n    printf (\"\\n\");\n\n    if (blocks_index)\n        bloom_destroy (blocks_index);\n    if (fs_index)\n        bloom_destroy(fs_index);\n    g_hash_table_destroy (exist_blocks);\n    if (exist_fs)\n        g_hash_table_destroy (exist_fs);\n    gc_data_free (data);\n    g_list_free_full(virtual_repos, (GDestroyNotify)gc_data_free);\n    return ret;\n}\n\ntypedef enum RemoveType {\n    COMMIT,\n    FS,\n    BLOCK\n} RemoveType;\n\ntypedef struct RemoveTask {\n    const char *repo_id;\n    RemoveType remove_type;\n    gboolean success;\n} RemoveTask;\n\nstatic void\nremove_store (gpointer data, gpointer user_data)\n{\n    RemoveTask *task = data;\n    GAsyncQueue *async_queue = user_data;\n    int ret = 0;\n\n    switch (task->remove_type) {\n        case COMMIT:\n            seaf_message (\"Deleting commits for repo %s.\\n\", task->repo_id);\n            ret = seaf_commit_manager_remove_store (seaf->commit_mgr, task->repo_id);\n            if (ret == 0) {\n                task->success = TRUE;\n            }\n            break;\n        case FS:\n            seaf_message (\"Deleting fs objects for repo %s.\\n\", task->repo_id);\n            ret = seaf_fs_manager_remove_store (seaf->fs_mgr, task->repo_id);\n            if (ret == 0) {\n                task->success = TRUE;\n            }\n            break;\n        case BLOCK:\n            seaf_message (\"Deleting blocks for repo %s.\\n\", task->repo_id);\n            ret = seaf_block_manager_remove_store (seaf->block_mgr, task->repo_id);\n            if (ret == 0) {\n                task->success = TRUE;\n            }\n            break;\n        default:\n            break;\n    }\n\n    g_async_queue_push (async_queue, task);\n}\n\nvoid\ndelete_garbaged_repos (int dry_run, int thread_num)\n{\n    GList *del_repos = NULL;\n    GList *ptr;\n    GAsyncQueue *async_queue = NULL;\n    int tnum;\n    GThreadPool *tpool = NULL;\n    RemoveTask *task = NULL;\n    int n_tasks = 0;\n    char *repo_id;\n    char *dup_id;\n    GHashTableIter iter;\n    gpointer key, value;\n    GHashTable *deleted;\n\n    deleted = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    seaf_message (\"=== Repos deleted by users ===\\n\");\n    del_repos = seaf_repo_manager_list_garbage_repos (seaf->repo_mgr);\n\n    if (!dry_run && del_repos) {\n        async_queue = g_async_queue_new ();\n        if (!async_queue) {\n            seaf_warning (\"Failed to create async queue.\\n\");\n            goto out;\n        }\n\n        tnum = thread_num <= 0 ? MAX_THREADS : thread_num;\n        tpool = g_thread_pool_new (remove_store, async_queue, tnum, FALSE, NULL);\n        if (!tpool) {\n            seaf_warning (\"Failed to create thread pool.\\n\");\n            goto out;\n        }\n    }\n\n    for (ptr = del_repos; ptr; ptr = ptr->next) {\n        repo_id = ptr->data;\n        if (!is_uuid_valid(repo_id)) {\n            continue;\n        }\n\n        /* Confirm repo doesn't exist before removing blocks. */\n        if (!seaf_repo_manager_repo_exists (seaf->repo_mgr, repo_id)) {\n            if (!dry_run) {\n                seaf_message (\"Start to GC deleted repo %s.\\n\", repo_id);\n                // Remove commit\n                task = g_new0 (RemoveTask, 1);\n                task->repo_id = repo_id;\n                task->remove_type = COMMIT;\n                g_thread_pool_push (tpool, task, NULL);\n\n                // Remove fs\n                task = g_new0 (RemoveTask, 1);\n                task->repo_id = repo_id;\n                task->remove_type = FS;\n                g_thread_pool_push (tpool, task, NULL);\n\n                // Remove block\n                task = g_new0 (RemoveTask, 1);\n                task->repo_id = repo_id;\n                task->remove_type = BLOCK;\n                g_thread_pool_push (tpool, task, NULL);\n\n                n_tasks += 3;\n\n                dup_id = g_strdup (repo_id);\n                g_hash_table_insert (deleted, dup_id, dup_id);\n            } else {\n                seaf_message (\"Repo %s can be GC'ed.\\n\", repo_id);\n            }\n        }\n    }\n\n    while (n_tasks > 0 && (task = g_async_queue_pop (async_queue))) {\n        n_tasks--;\n        if (!task->success) {\n            if (g_hash_table_lookup (deleted, task->repo_id)) {\n                g_hash_table_remove(deleted, task->repo_id);\n            }\n        }\n        g_free (task);\n    }\n\n    if (!dry_run) {\n        g_hash_table_iter_init (&iter, deleted);\n        while (g_hash_table_iter_next (&iter, &key, &value)) {\n            seaf_repo_manager_remove_garbage_repo (seaf->repo_mgr, (char *)key);\n        }\n    }\n\nout:\n    g_hash_table_destroy (deleted);\n    if (tpool)\n        g_thread_pool_free (tpool, TRUE, TRUE);\n    if (async_queue)\n        g_async_queue_unref (async_queue);\n    string_list_free (del_repos);\n}\n\ntypedef struct GCRepoParam {\n    int dry_run;\n    int verbose;\n    int rm_fs;\n    gboolean online;\n    GAsyncQueue *async_queue;\n} GCRepoParam;\n\ntypedef struct GCRepo {\n    SeafRepo *repo;\n    gint64 gc_ret;\n} GCRepo;\n\nstatic void\nfree_gc_repo (GCRepo *gc_repo)\n{\n    if (!gc_repo)\n        return;\n\n    seaf_repo_unref (gc_repo->repo);\n    g_free (gc_repo);\n}\n\nstatic void\ngc_repo_cb (gpointer data, gpointer user_data)\n{\n    GCRepo *gc_repo = data;\n    GCRepoParam *param = user_data;\n    SeafRepo *repo = gc_repo->repo;\n\n    seaf_message (\"GC version %d repo %s(%s)\\n\",\n                  repo->version, repo->name, repo->id);\n\n    gc_repo->gc_ret = gc_v1_repo (repo, param->dry_run,\n                                  param->online, param->verbose, param->rm_fs);\n\n    g_async_queue_push (param->async_queue, gc_repo);\n}\n\nint\ngc_core_run (GList *repo_id_list, const char *id_prefix,\n             int dry_run, int verbose, int thread_num, int rm_fs)\n{\n    GList *ptr;\n    SeafRepo *repo;\n    GList *corrupt_repos = NULL;\n    GList *del_block_repos = NULL;\n    gboolean del_garbage = FALSE;\n    GAsyncQueue *async_queue = NULL;\n    GCRepoParam *param = NULL;\n    int tnum;\n    GThreadPool *tpool = NULL;\n    int gc_repo_num = 0;\n    GCRepo *gc_repo = NULL;\n    char *repo_id;\n    gboolean online;\n\n    if (seaf_db_type (seaf->db) == SEAF_DB_TYPE_SQLITE) {\n        online = FALSE;\n        seaf_message (\"Database is SQLite, use offline GC.\\n\");\n    } else {\n        online = TRUE;\n        seaf_message (\"Database is MySQL/Postgre/Oracle, use online GC.\\n\");\n    }\n\n    async_queue = g_async_queue_new ();\n    if (!async_queue) {\n        seaf_warning (\"Failed to create async queue, stop gc.\\n\");\n        return -1;\n    }\n\n    param = g_new0 (GCRepoParam, 1);\n    param->dry_run = dry_run;\n    param->verbose = verbose;\n    param->rm_fs = rm_fs;\n    param->online = online;\n    param->async_queue = async_queue;\n\n    tnum = thread_num <= 0 ? MAX_THREADS : thread_num;\n    tpool = g_thread_pool_new (gc_repo_cb, param, tnum, FALSE, NULL);\n    if (!tpool) {\n        seaf_warning (\"Failed to create thread pool, stop gc.\\n\");\n        g_async_queue_unref (async_queue);\n        g_free (param);\n        return -1;\n    }\n\n    seaf_message (\"Using up to %d threads to run GC.\\n\", tnum);\n\n    if (id_prefix) {\n        if (repo_id_list)\n            g_list_free (repo_id_list);\n        repo_id_list = seaf_repo_manager_get_repo_id_list_by_prefix (seaf->repo_mgr, id_prefix);\n        del_garbage = TRUE;\n    } else if (repo_id_list == NULL) {\n        repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr);\n        del_garbage = TRUE;\n    }\n\n    for (ptr = repo_id_list; ptr; ptr = ptr->next) {\n        repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, (const gchar *)ptr->data);\n\n        g_free (ptr->data);\n\n        if (!repo)\n            continue;\n\n        if (repo->is_corrupted) {\n            corrupt_repos = g_list_prepend (corrupt_repos, g_strdup(repo->id));\n            seaf_message (\"Repo %s is damaged, skip GC.\\n\\n\", repo->id);\n            seaf_repo_unref (repo);\n            continue;\n        }\n\n        if (!repo->is_virtual) {\n            gc_repo = g_new0 (GCRepo, 1);\n            gc_repo->repo = repo;\n            g_thread_pool_push (tpool, gc_repo, NULL);\n            gc_repo_num++;\n        } else {\n            seaf_repo_unref (repo);\n        }\n    }\n    g_list_free (repo_id_list);\n\n    while (gc_repo_num > 0 && (gc_repo = g_async_queue_pop (async_queue))) {\n        if (gc_repo->gc_ret < 0) {\n            corrupt_repos = g_list_prepend (corrupt_repos, g_strdup(gc_repo->repo->id));\n        } else if (dry_run && gc_repo->gc_ret) {\n            del_block_repos = g_list_prepend (del_block_repos, g_strdup(gc_repo->repo->id));\n        }\n        free_gc_repo (gc_repo);\n        gc_repo_num--;\n    }\n\n    if (del_garbage) {\n        delete_garbaged_repos (dry_run, tnum);\n    }\n\n    seaf_message (\"=== GC is finished ===\\n\");\n\n    if (corrupt_repos) {\n        seaf_message (\"The following repos are damaged. \"\n                      \"You can run seaf-fsck to fix them.\\n\");\n        for (ptr = corrupt_repos; ptr; ptr = ptr->next) {\n            repo_id = ptr->data;\n            seaf_message (\"%s\\n\", repo_id);\n            g_free (repo_id);\n        }\n        g_list_free (corrupt_repos);\n    }\n\n    if (del_block_repos) {\n        printf(\"\\n\");\n        seaf_message (\"The following repos have blocks to be removed:\\n\");\n        for (ptr = del_block_repos; ptr; ptr = ptr->next) {\n            repo_id = ptr->data;\n            seaf_message (\"%s\\n\", repo_id);\n            g_free (repo_id);\n        }\n        g_list_free (del_block_repos);\n    }\n\n    g_thread_pool_free (tpool, TRUE, TRUE);\n    g_async_queue_unref (async_queue);\n    g_free (param);\n\n    return 0;\n}\n"
  },
  {
    "path": "server/gc/gc-core.h",
    "content": "#ifndef GC_CORE_H\n#define GC_CORE_H\n\nint gc_core_run (GList *repo_id_list, const char *id_prefix,\n                 int dry_run, int verbose, int thread_num, int rm_fs);\n\nvoid\ndelete_garbaged_repos (int dry_run, int thread_num);\n\n#endif\n"
  },
  {
    "path": "server/gc/repo-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n#include <glib/gstdio.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n\n#include \"seaf-db.h\"\n\n#define INDEX_DIR \"index\"\n\nstruct _SeafRepoManagerPriv {\n\n};\n\nstatic SeafRepo *\nload_repo (SeafRepoManager *manager, const char *repo_id, gboolean ret_corrupt);\n\ngboolean\nis_repo_id_valid (const char *id)\n{\n    if (!id)\n        return FALSE;\n\n    return is_uuid_valid (id);\n}\n\nSeafRepo*\nseaf_repo_new (const char *id, const char *name, const char *desc)\n{\n    SeafRepo* repo;\n\n    /* valid check */\n  \n    \n    repo = g_new0 (SeafRepo, 1);\n    memcpy (repo->id, id, 36);\n    repo->id[36] = '\\0';\n\n    repo->name = g_strdup(name);\n    repo->desc = g_strdup(desc);\n\n    repo->ref_cnt = 1;\n\n    return repo;\n}\n\nvoid\nseaf_repo_free (SeafRepo *repo)\n{\n    if (repo->name) g_free (repo->name);\n    if (repo->desc) g_free (repo->desc);\n    if (repo->category) g_free (repo->category);\n    if (repo->head) seaf_branch_unref (repo->head);\n    g_free (repo->pwd_hash_algo);\n    g_free (repo->pwd_hash_params);\n    g_free (repo);\n}\n\nvoid\nseaf_repo_ref (SeafRepo *repo)\n{\n    g_atomic_int_inc (&repo->ref_cnt);\n}\n\nvoid\nseaf_repo_unref (SeafRepo *repo)\n{\n    if (!repo)\n        return;\n\n    if (g_atomic_int_dec_and_test (&repo->ref_cnt))\n        seaf_repo_free (repo);\n}\n\nstatic void\nset_head_common (SeafRepo *repo, SeafBranch *branch)\n{\n    if (repo->head)\n        seaf_branch_unref (repo->head);\n    repo->head = branch;\n    seaf_branch_ref(branch);\n}\n\nvoid\nseaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    repo->name = g_strdup (commit->repo_name);\n    repo->desc = g_strdup (commit->repo_desc);\n    repo->encrypted = commit->encrypted;\n    repo->repaired = commit->repaired;\n    if (repo->encrypted) {\n        repo->enc_version = commit->enc_version;\n        if (repo->enc_version == 1 && !commit->pwd_hash_algo)\n            memcpy (repo->magic, commit->magic, 32);\n        else if (repo->enc_version == 2) {\n            memcpy (repo->random_key, commit->random_key, 96);\n        } else if (repo->enc_version == 3) {\n            memcpy (repo->random_key, commit->random_key, 96);\n            memcpy (repo->salt, commit->salt, 64);\n        } else if (repo->enc_version == 4) {\n            memcpy (repo->random_key, commit->random_key, 96);\n            memcpy (repo->salt, commit->salt, 64);\n        }\n        if (repo->enc_version >= 2 && !commit->pwd_hash_algo) {\n            memcpy (repo->magic, commit->magic, 64);\n        }\n        if (commit->pwd_hash_algo) {\n            memcpy (repo->pwd_hash, commit->pwd_hash, 64);\n            repo->pwd_hash_algo = g_strdup (commit->pwd_hash_algo);\n            repo->pwd_hash_params = g_strdup (commit->pwd_hash_params);\n        }\n    }\n    repo->no_local_history = commit->no_local_history;\n    repo->version = commit->version;\n}\n\nvoid\nseaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    commit->repo_name = g_strdup (repo->name);\n    commit->repo_desc = g_strdup (repo->desc);\n    commit->encrypted = repo->encrypted;\n    commit->repaired = repo->repaired;\n    if (commit->encrypted) {\n        commit->enc_version = repo->enc_version;\n        if (commit->enc_version == 1 && !repo->pwd_hash_algo)\n            commit->magic = g_strdup (repo->magic);\n        else if (commit->enc_version == 2) {\n            commit->random_key = g_strdup (repo->random_key);\n        } else if (commit->enc_version == 3) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        } else if (commit->enc_version == 4) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        }\n        if (commit->enc_version >= 2 && !repo->pwd_hash_algo) {\n            commit->magic = g_strdup (repo->magic);\n        }\n        if (repo->pwd_hash_algo) {\n            commit->pwd_hash = g_strdup (repo->pwd_hash);\n            commit->pwd_hash_algo = g_strdup (repo->pwd_hash_algo);\n            commit->pwd_hash_params = g_strdup (repo->pwd_hash_params);\n        }\n    }\n    commit->no_local_history = repo->no_local_history;\n    commit->version = repo->version;\n}\n\nstatic gboolean\ncollect_commit (SeafCommit *commit, void *vlist, gboolean *stop)\n{\n    GList **commits = vlist;\n\n    /* The traverse function will unref the commit, so we need to ref it.\n     */\n    seaf_commit_ref (commit);\n    *commits = g_list_prepend (*commits, commit);\n    return TRUE;\n}\n\nGList *\nseaf_repo_get_commits (SeafRepo *repo)\n{\n    GList *branches;\n    GList *ptr;\n    SeafBranch *branch;\n    GList *commits = NULL;\n\n    branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);\n    if (branches == NULL) {\n        seaf_warning (\"Failed to get branch list of repo %s.\\n\", repo->id);\n        return NULL;\n    }\n\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        branch = ptr->data;\n        gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                                 repo->id,\n                                                                 repo->version,\n                                                                 branch->commit_id,\n                                                                 collect_commit,\n                                                                 &commits,\n                                                                 FALSE);\n        if (!res) {\n            for (ptr = commits; ptr != NULL; ptr = ptr->next)\n                seaf_commit_unref ((SeafCommit *)(ptr->data));\n            g_list_free (commits);\n            goto out;\n        }\n    }\n\n    commits = g_list_reverse (commits);\n\nout:\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        seaf_branch_unref ((SeafBranch *)ptr->data);\n    }\n    return commits;\n}\n\nSeafRepoManager*\nseaf_repo_manager_new (SeafileSession *seaf)\n{\n    SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1);\n\n    mgr->priv = g_new0 (SeafRepoManagerPriv, 1);\n    mgr->seaf = seaf;\n\n    return mgr;\n}\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr)\n{\n    return 0;\n}\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr)\n{\n    return 0;\n}\n\nstatic gboolean\nrepo_exists_in_db (SeafDB *db, const char *id)\n{\n    char sql[256];\n    gboolean db_err = FALSE;\n\n    snprintf (sql, sizeof(sql), \"SELECT repo_id FROM Repo WHERE repo_id = '%s'\",\n              id);\n    return seaf_db_check_for_existence (db, sql, &db_err);\n}\n\nstatic gboolean\nrepo_exists_in_db_ex (SeafDB *db, const char *id, gboolean *db_err)\n{\n    char sql[256];\n\n    snprintf (sql, sizeof(sql), \"SELECT repo_id FROM Repo WHERE repo_id = '%s'\",\n              id);\n    return seaf_db_check_for_existence (db, sql, db_err);\n}\n\nSeafRepo*\nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id)\n{\n    SeafRepo repo;\n    int len = strlen(id);\n\n    if (len >= 37)\n        return NULL;\n\n    memcpy (repo.id, id, len + 1);\n\n    if (repo_exists_in_db (manager->seaf->db, id)) {\n        SeafRepo *ret = load_repo (manager, id, FALSE);\n        if (!ret)\n            return NULL;\n        /* seaf_repo_ref (ret); */\n        return ret;\n    }\n\n    return NULL;\n}\n\nSeafRepo*\nseaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id)\n{\n    int len = strlen(id);\n    gboolean db_err = FALSE, exists;\n    SeafRepo *ret = NULL;\n\n    if (len >= 37)\n        return NULL;\n\n    exists = repo_exists_in_db_ex (manager->seaf->db, id, &db_err);\n\n    if (db_err) {\n        ret = seaf_repo_new(id, NULL, NULL);\n        ret->is_corrupted = TRUE;\n        return ret;\n    }\n\n    if (exists) {\n        ret = load_repo (manager, id, TRUE);\n        return ret;\n    }\n\n    return NULL;\n}\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id)\n{\n    SeafRepo repo;\n    memcpy (repo.id, id, 37);\n\n    return repo_exists_in_db (manager->seaf->db, id);\n}\n\nstatic void\nload_repo_commit (SeafRepoManager *manager,\n                  SeafRepo *repo,\n                  SeafBranch *branch)\n{\n    SeafCommit *commit;\n\n    commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,\n                                                        repo->id,\n                                                        branch->commit_id);\n    if (!commit) {\n        seaf_warning (\"Commit %s is missing\\n\", branch->commit_id);\n        repo->is_corrupted = TRUE;\n        return;\n    }\n\n    set_head_common (repo, branch);\n    seaf_repo_from_commit (repo, commit);\n\n    seaf_commit_unref (commit);\n}\n\nstatic SeafRepo *\nload_repo (SeafRepoManager *manager, const char *repo_id, gboolean ret_corrupt)\n{\n    SeafRepo *repo;\n    SeafBranch *branch;\n    SeafVirtRepo *vinfo = NULL;\n\n    repo = seaf_repo_new(repo_id, NULL, NULL);\n    if (!repo) {\n        seaf_warning (\"[repo mgr] failed to alloc repo.\\n\");\n        return NULL;\n    }\n\n    repo->manager = manager;\n\n    branch = seaf_branch_manager_get_branch (seaf->branch_mgr, repo_id, \"master\");\n    if (!branch) {\n        seaf_warning (\"Failed to get master branch of repo %.8s.\\n\", repo_id);\n        repo->is_corrupted = TRUE;\n    } else {\n        load_repo_commit (manager, repo, branch);\n        seaf_branch_unref (branch);\n    }\n\n    if (repo->is_corrupted) {\n        if (!ret_corrupt) {\n            seaf_repo_free (repo);\n            return NULL;\n        }\n        return repo;\n    }\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (manager, repo_id);\n    if (vinfo) {\n        repo->is_virtual = TRUE;\n        memcpy (repo->store_id, vinfo->origin_repo_id, 36);\n    } else {\n        repo->is_virtual = FALSE;\n        memcpy (repo->store_id, repo->id, 36);\n    }\n    seaf_virtual_repo_info_free (vinfo);\n\n    return repo;\n}\n\nstatic gboolean\ncollect_repo_id (SeafDBRow *row, void *data)\n{\n    GList **p_ids = data;\n    const char *repo_id;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr)\n{\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT repo_id FROM Repo\");\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &ret) < 0)\n        return NULL;\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_repo_id_list_by_prefix (SeafRepoManager *mgr,\n                                              const char *prefix)\n{\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT repo_id FROM Repo WHERE repo_id LIKE '%s%%'\", prefix);\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,\n                                      collect_repo_id, &ret) < 0) {\n        return NULL;\n    }\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr,\n                                 int start, int limit,\n                                 gboolean *error)\n{\n    char sql[256];\n    GList *id_list = NULL, *ptr;\n    GList *ret = NULL;\n    SeafRepo *repo;\n\n    *error = FALSE;\n\n    if (start == -1 && limit == -1)\n        snprintf (sql, 256, \"SELECT repo_id FROM Repo\");\n    else\n        snprintf (sql, 256, \"SELECT repo_id FROM Repo LIMIT %d, %d\", start, limit);\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &id_list) < 0)\n        goto error;\n\n    for (ptr = id_list; ptr; ptr = ptr->next) {\n        char *repo_id = ptr->data;\n        repo = seaf_repo_manager_get_repo_ex (mgr, repo_id);\n        if (repo)\n            ret = g_list_prepend (ret, repo);\n    }\n\n    string_list_free (id_list);\n    return ret;\n\nerror:\n    *error = TRUE;\n    string_list_free (id_list);\n    return NULL;\n}\n\nint\nseaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          int days)\n{\n    SeafVirtRepo *vinfo;\n    SeafDB *db = mgr->seaf->db;\n    char sql[256];\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo) {\n        seaf_virtual_repo_info_free (vinfo);\n        return 0;\n    }\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean err;\n        snprintf(sql, sizeof(sql),\n                 \"SELECT repo_id FROM RepoHistoryLimit \"\n                 \"WHERE repo_id='%s'\", repo_id);\n        if (seaf_db_check_for_existence(db, sql, &err))\n            snprintf(sql, sizeof(sql),\n                     \"UPDATE RepoHistoryLimit SET days=%d\"\n                     \"WHERE repo_id='%s'\", days, repo_id);\n        else\n            snprintf(sql, sizeof(sql),\n                     \"INSERT INTO RepoHistoryLimit (repo_id, days) VALUES \"\n                     \"('%s', %d)\", repo_id, days);\n        if (err)\n            return -1;\n        return seaf_db_query(db, sql);\n    } else {\n        snprintf (sql, sizeof(sql),\n                  \"REPLACE INTO RepoHistoryLimit (repo_id, days) VALUES ('%s', %d)\",\n                  repo_id, days);\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    }\n\n    return 0;\n}\n\nstatic gboolean\nget_limit (SeafDBRow *row, void *vdays)\n{\n    int *days = vdays;\n\n    *days = seaf_db_row_get_column_int (row, 0);\n\n    return FALSE;\n}\n\nint\nseaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id)\n{\n    SeafVirtRepo *vinfo;\n    const char *r_repo_id = repo_id;\n    char sql[256];\n    int per_repo_days = -1;\n    int ret;\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo)\n        r_repo_id = vinfo->origin_repo_id;\n\n    snprintf (sql, sizeof(sql),\n              \"SELECT days FROM RepoHistoryLimit WHERE repo_id='%s'\",\n              r_repo_id);\n    seaf_virtual_repo_info_free (vinfo);\n\n    /* We don't use seaf_db_get_int() because we need to differ DB error\n     * from not exist.\n     * We can't just return global config value if DB error occured,\n     * since the global value may be smaller than per repo one.\n     * This can lead to data lose in GC.\n     */\n    ret = seaf_db_foreach_selected_row (mgr->seaf->db, sql,\n                                        get_limit, &per_repo_days);\n    if (ret == 0) {\n        /* If per repo value is not set, return the global one. */\n        per_repo_days = seaf_cfg_manager_get_config_int (mgr->seaf->cfg_mgr,\n                                                         \"history\", \"keep_days\");\n    }\n\n    if (per_repo_days < 0) {\n        per_repo_days = -1;\n    }\n\n    return per_repo_days;\n}\n\nint\nseaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        gint64 timestamp)\n{\n    SeafDB *db = mgr->seaf->db;\n    char sql[256];\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean err;\n        snprintf(sql, sizeof(sql),\n                 \"SELECT repo_id FROM RepoValidSince WHERE \"\n                 \"repo_id='%s'\", repo_id);\n        if (seaf_db_check_for_existence(db, sql, &err))\n            snprintf(sql, sizeof(sql),\n                     \"UPDATE RepoValidSince SET timestamp=%\"G_GINT64_FORMAT\n                     \" WHERE repo_id='%s'\", timestamp, repo_id);\n        else\n            snprintf(sql, sizeof(sql),\n                     \"INSERT INTO RepoValidSince (repo_id, timestamp) VALUES \"\n                     \"('%s', %\"G_GINT64_FORMAT\")\", repo_id, timestamp);\n        if (err)\n            return -1;\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    } else {\n        snprintf (sql, sizeof(sql),\n                  \"REPLACE INTO RepoValidSince (repo_id, timestamp) VALUES ('%s', %\"G_GINT64_FORMAT\")\",\n                  repo_id, timestamp);\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    }\n\n    return 0;\n}\n\ngint64\nseaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id)\n{\n    char sql[256];\n\n    snprintf (sql, sizeof(sql),\n              \"SELECT timestamp FROM RepoValidSince WHERE repo_id='%s'\",\n              repo_id);\n    /* Also return -1 if DB error. */\n    return seaf_db_get_int64 (mgr->seaf->db, sql);\n}\n\ngint64\nseaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr,\n                                          const char *repo_id)\n{\n    int days;\n    gint64 timestamp;\n\n    days = seaf_repo_manager_get_repo_history_limit (mgr, repo_id);\n    timestamp = seaf_repo_manager_get_repo_valid_since (mgr, repo_id);\n\n    gint64 now = (gint64)time(NULL);\n    if (days > 0)\n        return MAX (now - days * 24 * 3600, timestamp);\n    else if (days < 0)\n        return timestamp;\n    else\n        return 0;\n}\n\nstatic gboolean\nload_virtual_info (SeafDBRow *row, void *p_vinfo)\n{\n    SeafVirtRepo *vinfo;\n    const char *origin_repo_id, *path, *base_commit;\n\n    origin_repo_id = seaf_db_row_get_column_text (row, 0);\n    path = seaf_db_row_get_column_text (row, 1);\n    base_commit = seaf_db_row_get_column_text (row, 2);\n\n    vinfo = g_new0 (SeafVirtRepo, 1);\n    memcpy (vinfo->origin_repo_id, origin_repo_id, 36);\n    vinfo->path = g_strdup(path);\n    memcpy (vinfo->base_commit, base_commit, 40);\n\n    *((SeafVirtRepo **)p_vinfo) = vinfo;\n\n    return FALSE;\n}\n\nSeafVirtRepo *\nseaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr,\n                                         const char *repo_id)\n{\n    char sql[256];\n    SeafVirtRepo *vinfo = NULL;\n\n    snprintf (sql, 256,\n              \"SELECT origin_repo, path, base_commit FROM VirtualRepo \"\n              \"WHERE repo_id = '%s'\", repo_id);\n    seaf_db_foreach_selected_row (seaf->db, sql, load_virtual_info, &vinfo);\n\n    return vinfo;\n}\n\nvoid\nseaf_virtual_repo_info_free (SeafVirtRepo *vinfo)\n{\n    if (!vinfo) return;\n\n    g_free (vinfo->path);\n    g_free (vinfo);\n}\n\nstatic gboolean\ncollect_virtual_repo_ids (SeafDBRow *row, void *data)\n{\n    GList **p_ids = data;\n    const char *repo_id;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr,\n                                                  const char *origin_repo)\n{\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256,\n              \"SELECT repo_id FROM VirtualRepo WHERE origin_repo='%s'\",\n              origin_repo);\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_virtual_repo_ids, &ret) < 0) {\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\nget_garbage_repo_id (SeafDBRow *row, void *vid_list)\n{\n    GList **ret = vid_list;\n    char *repo_id;\n\n    repo_id = g_strdup(seaf_db_row_get_column_text (row, 0));\n    *ret = g_list_prepend (*ret, repo_id);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_list_garbage_repos (SeafRepoManager *mgr)\n{\n    GList *repo_ids = NULL;\n\n    seaf_db_foreach_selected_row (seaf->db,\n                                  \"SELECT repo_id FROM GarbageRepos\",\n                                  get_garbage_repo_id, &repo_ids);\n\n    return repo_ids;\n}\n\nvoid\nseaf_repo_manager_remove_garbage_repo (SeafRepoManager *mgr, const char *repo_id)\n{\n    char sql[256];\n\n    snprintf (sql, sizeof(sql), \"DELETE FROM GarbageRepos WHERE repo_id='%s'\",\n              repo_id);\n    seaf_db_query (seaf->db, sql);\n}\n"
  },
  {
    "path": "server/gc/repo-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_REPO_MGR_H\n#define SEAF_REPO_MGR_H\n\n#include <pthread.h>\n\n#include \"seafile-object.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n\nstruct _SeafRepoManager;\ntypedef struct _SeafRepo SeafRepo;\n\ntypedef struct SeafVirtRepo {\n    char        origin_repo_id[37];\n    char        *path;\n    char        base_commit[41];\n} SeafVirtRepo;\n\nstruct _SeafRepo {\n    struct _SeafRepoManager *manager;\n\n    gchar       id[37];\n    gchar      *name;\n    gchar      *desc;\n    gchar      *category;       /* not used yet */\n    gboolean    encrypted;\n    int         enc_version;\n    gchar       magic[65];       /* hash(repo_id + passwd), key stretched. */\n    gchar       pwd_hash[65];       /* hash(repo_id + passwd), key stretched. */\n    gchar       *pwd_hash_algo;\n    gchar       *pwd_hash_params;\n    gchar       random_key[97];\n    gchar       salt[65];\n    gboolean    no_local_history;\n\n    SeafBranch *head;\n\n    gboolean    is_corrupted;\n    gboolean    repaired;\n    gboolean    delete_pending;\n    int         ref_cnt;\n\n    int version;\n    /* Used to access fs and block sotre.\n     * This id is different from repo_id when this repo is virtual.\n     * Virtual repos share fs and block store with its origin repo.\n     * However, commit store for each repo is always independent.\n     * So always use repo_id to access commit store.\n     */\n    gchar       store_id[37];\n    gboolean    is_virtual;\n};\n\ngboolean is_repo_id_valid (const char *id);\n\nSeafRepo* \nseaf_repo_new (const char *id, const char *name, const char *desc);\n\nvoid\nseaf_repo_free (SeafRepo *repo);\n\nvoid\nseaf_repo_ref (SeafRepo *repo);\n\nvoid\nseaf_repo_unref (SeafRepo *repo);\n\nvoid\nseaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit);\n\nvoid\nseaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit);\n\nvoid\nseaf_virtual_repo_info_free (SeafVirtRepo *vinfo);\n\ntypedef struct _SeafRepoManager SeafRepoManager;\ntypedef struct _SeafRepoManagerPriv SeafRepoManagerPriv;\n\nstruct _SeafRepoManager {\n    struct _SeafileSession *seaf;\n\n    SeafRepoManagerPriv *priv;\n};\n\nSeafRepoManager* \nseaf_repo_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo);\n\nint\nseaf_repo_manager_del_repo (SeafRepoManager *mgr, SeafRepo *repo);\n\nSeafRepo* \nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id);\n\nSeafRepo* \nseaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id);\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id);\n\nGList* \nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr,\n                                 int start, int limit,\n                                 gboolean *error);\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr);\n\nGList *\nseaf_repo_manager_get_repo_id_list_by_prefix (SeafRepoManager *mgr,\n                                              const char *prefix);\n\nint\nseaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          int days);\n\nint\nseaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id);\n\nint\nseaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        gint64 timestamp);\n\ngint64\nseaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id);\n\n/*\n * Return the timestamp to stop traversing history.\n * Returns > 0 if traverse a period of history;\n * Returns = 0 if only traverse the head commit;\n * Returns < 0 if traverse full history.\n */\ngint64\nseaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr,\n                                          const char *repo_id);\n\nSeafVirtRepo *\nseaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr,\n                                         const char *repo_id);\n\nvoid\nseaf_virtual_repo_info_free (SeafVirtRepo *vinfo);\n\nGList *\nseaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr,\n                                                  const char *origin_repo);\n\nGList *\nseaf_repo_manager_list_garbage_repos (SeafRepoManager *mgr);\n\nvoid\nseaf_repo_manager_remove_garbage_repo (SeafRepoManager *mgr, const char *repo_id);\n\n#endif\n"
  },
  {
    "path": "server/gc/seaf-fsck.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n\n#include <getopt.h>\n\n#include \"seafile-session.h\"\n#include \"fsck.h\"\n\n#include \"utils.h\"\n\nstatic char *ccnet_dir = NULL;\nstatic char *seafile_dir = NULL;\nstatic char *central_config_dir = NULL;\n\nSeafileSession *seaf;\n\nstatic const char *short_opts = \"hvft:c:d:rE:F:sS\";\nstatic const struct option long_opts[] = {\n    { \"help\", no_argument, NULL, 'h', },\n    { \"version\", no_argument, NULL, 'v', },\n    { \"force\", no_argument, NULL, 'f', },\n    { \"repair\", no_argument, NULL, 'r', },\n    { \"threads\", required_argument, NULL, 't', },\n    { \"export\", required_argument, NULL, 'E', },\n    { \"config-file\", required_argument, NULL, 'c', },\n    { \"central-config-dir\", required_argument, NULL, 'F' },\n    { \"seafdir\", required_argument, NULL, 'd', },\n    { \"shallow\", no_argument, NULL, 's', },\n    { \"check-file-size\", no_argument, NULL, 'S' },\n    { 0, 0, 0, 0, },\n};\n\nstatic void usage ()\n{\n    fprintf (stderr, \"usage: seaf-fsck [-r] [-E exported_path] [-c config_dir] [-d seafile_dir] \"\n                     \"[repo_id_1 [repo_id_2 ...]]\\n\");\n}\n\n#ifdef WIN32\n/* Get the commandline arguments in unicode, then convert them to utf8  */\nstatic char **\nget_argv_utf8 (int *argc)\n{\n    int i = 0;\n    char **argv = NULL;\n    const wchar_t *cmdline = NULL;\n    wchar_t **argv_w = NULL;\n\n    cmdline = GetCommandLineW();\n    argv_w = CommandLineToArgvW (cmdline, argc);\n    if (!argv_w) {\n        printf(\"failed to CommandLineToArgvW(), GLE=%lu\\n\", GetLastError());\n        return NULL;\n    }\n\n    argv = (char **)malloc (sizeof(char*) * (*argc));\n    for (i = 0; i < *argc; i++) {\n        argv[i] = wchar_to_utf8 (argv_w[i]);\n    }\n\n    return argv;\n}\n#endif\n\n#ifdef __linux__\n\n/* Compare the owner uid of the seafile-data dir with the current uid. */\nstatic gboolean\ncheck_user (const char *seafile_dir, uid_t *current_user, uid_t *seafile_user)\n{\n    struct stat st;\n    uid_t euid;\n\n    if (stat (seafile_dir, &st) < 0) {\n        seaf_warning (\"Failed to stat seafile data dir %s: %s\\n\",\n                      seafile_dir, strerror(errno));\n        return FALSE;\n    }\n\n    euid = geteuid();\n\n    *current_user = euid;\n    *seafile_user = st.st_uid;\n\n    return (euid == st.st_uid);\n}\n\n#endif  /* __linux__ */\n\nint\nmain(int argc, char *argv[])\n{\n    int c;\n    gboolean repair = FALSE;\n    gboolean force = FALSE;\n    gboolean check_integrity = TRUE;\n    gboolean check_file_size = FALSE;\n    char *export_path = NULL;\n    int max_thread_num = 0;\n\n#ifdef WIN32\n    argv = get_argv_utf8 (&argc);\n#endif\n\n    ccnet_dir = DEFAULT_CONFIG_DIR;\n\n    while ((c = getopt_long(argc, argv,\n                short_opts, long_opts, NULL)) != EOF) {\n        switch (c) {\n        case 'h':\n            usage();\n            exit(0);\n        case 'v':\n            exit(-1);\n            break;\n        case 'f':\n            force = TRUE;\n            break;\n        case 't':\n            max_thread_num = atoi(strdup(optarg));\n            break;\n        case 'r':\n            repair = TRUE;\n            break;\n        case 'E':\n            export_path = strdup(optarg);\n            break;\n        case 'c':\n            ccnet_dir = strdup(optarg);\n            break;\n        case 'd':\n            seafile_dir = strdup(optarg);\n            break;\n        case 'F':\n            central_config_dir = strdup(optarg);\n            break;\n        case 'S':\n            check_file_size = TRUE;\n            break;\n        case 's':\n            check_integrity = FALSE;\n            break;\n        default:\n            usage();\n            exit(-1);\n        }\n    }\n\n#if !GLIB_CHECK_VERSION(2, 35, 0)\n    g_type_init();\n#endif\n\n    if (seafile_log_init (\"-\", \"info\", \"debug\", \"seaf-fsck\") < 0) {\n        fprintf (stderr, \"Failed to init log.\\n\");\n        exit (1);\n    }\n\n    if (seafile_dir == NULL)\n        seafile_dir = g_build_filename (ccnet_dir, \"seafile-data\", NULL);\n\n#ifdef __linux__\n    uid_t current_user, seafile_user;\n    if (!export_path && !force && !check_user (seafile_dir, &current_user, &seafile_user)) {\n        seaf_message (\"Current user (%u) is not the user for running \"\n                      \"seafile server (%u). Unable to run fsck.\\n\",\n                      current_user, seafile_user);\n        exit(1);\n    }\n#endif\n\n    seaf = seafile_session_new(central_config_dir, seafile_dir, ccnet_dir,\n                               export_path == NULL);\n    if (!seaf) {\n        seaf_warning (\"Failed to create seafile session.\\n\");\n        exit (1);\n    }\n\n    GList *repo_id_list = NULL;\n    int i;\n    for (i = optind; i < argc; i++)\n        repo_id_list = g_list_append (repo_id_list, g_strdup(argv[i]));\n\n    if (export_path) {\n        export_file (repo_id_list, seafile_dir, export_path);\n    } else {\n        FsckOptions options;\n        memset (&options, 0, sizeof(FsckOptions));\n        options.max_thread_num = max_thread_num;\n        options.check_integrity = check_integrity;\n        options.check_file_size = check_file_size;\n        options.repair = repair;\n        seaf_fsck (repo_id_list, &options);\n    }\n\n    return 0;\n}\n"
  },
  {
    "path": "server/gc/seafile-session.c",
    "content": "#include \"common.h\"\n\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <unistd.h>\n\n#include <utils.h>\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n\n#include \"log.h\"\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir,\n                    const char *seafile_dir,\n                    const char *ccnet_dir,\n                    gboolean need_db)\n{\n    char *abs_central_config_dir = NULL;\n    char *abs_seafile_dir;\n    char *abs_ccnet_dir;\n    char *tmp_file_dir;\n    char *config_file_path;\n    struct stat st;\n    GKeyFile *config;\n    SeafileSession *session = NULL;\n\n    abs_seafile_dir = ccnet_expand_path (seafile_dir);\n    abs_ccnet_dir = ccnet_expand_path (ccnet_dir);\n    tmp_file_dir = g_build_filename (abs_seafile_dir, \"tmpfiles\", NULL);\n    if (central_config_dir) {\n        abs_central_config_dir = ccnet_expand_path (central_config_dir);\n    }\n    const char *confdir = abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir;\n    config_file_path = g_build_filename(confdir, \"seafile.conf\", NULL);\n\n    if (g_stat(confdir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning (\"Config dir dir %s does not exist\\n\",\n                   abs_seafile_dir);\n        goto onerror;\n    }\n\n    if (g_stat(abs_seafile_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning (\"Seafile data dir %s does not exist\\n\",\n                   abs_seafile_dir);\n        goto onerror;\n    }\n\n    if (g_stat(tmp_file_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {\n        seaf_warning (\"Seafile tmp dir %s does not exist\\n\",\n                   tmp_file_dir);\n        goto onerror;\n    }\n\n    GError *error = NULL;\n    config = g_key_file_new ();\n    if (!g_key_file_load_from_file (config, config_file_path, \n                                    G_KEY_FILE_NONE, &error)) {\n        seaf_warning (\"Failed to load config file.\\n\");\n        g_key_file_free (config);\n        goto onerror;\n    }\n\n    session = g_new0(SeafileSession, 1);\n    session->seaf_dir = abs_seafile_dir;\n    session->ccnet_dir = abs_ccnet_dir;\n    session->tmp_file_dir = tmp_file_dir;\n    session->config = config;\n\n    if (need_db) {\n        if (load_database_config (session) < 0) {\n            seaf_warning (\"Failed to load database config.\\n\");\n            goto onerror;\n        }\n    }\n\n    session->cfg_mgr = seaf_cfg_manager_new (session);\n    if (!session->cfg_mgr)\n        goto onerror;\n\n    session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);\n    if (!session->fs_mgr)\n        goto onerror;\n    session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);\n    if (!session->block_mgr)\n        goto onerror;\n    session->commit_mgr = seaf_commit_manager_new (session);\n    if (!session->commit_mgr)\n        goto onerror;\n    session->repo_mgr = seaf_repo_manager_new (session);\n    if (!session->repo_mgr)\n        goto onerror;\n    session->branch_mgr = seaf_branch_manager_new (session);\n    if (!session->branch_mgr)\n        goto onerror;\n\n    return session;\n\nonerror:\n    free (abs_seafile_dir);\n    g_free (tmp_file_dir);\n    g_free (config_file_path);\n    g_free (session);\n    return NULL;    \n}\n"
  },
  {
    "path": "server/gc/seafile-session.h",
    "content": "#ifndef SEAFILE_SESSION_H\n#define SEAFILE_SESSION_H\n\n#include <stdint.h>\n#include <glib.h>\n\n#include \"block-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"db.h\"\n#include \"seaf-db.h\"\n#include \"config-mgr.h\"\n\ntypedef struct _SeafileSession SeafileSession;\n\nstruct _SeafileSession {\n    char                *seaf_dir;\n    char                *ccnet_dir;\n    char                *tmp_file_dir;\n    /* Config that's only loaded on start */\n    GKeyFile            *config;\n    SeafDB              *db;\n    SeafDB              *ccnet_db;\n    char                *seahub_pk;\n\n    SeafBlockManager    *block_mgr;\n    SeafFSManager       *fs_mgr;\n    SeafCommitManager   *commit_mgr;\n    SeafBranchManager   *branch_mgr;\n    SeafRepoManager     *repo_mgr;\n    SeafCfgManager      *cfg_mgr;\n\n    gboolean create_tables;\n    gboolean ccnet_create_tables;\n};\n\nextern SeafileSession *seaf;\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir,\n                    const char *seafile_dir,\n                    const char *ccnet_dir,\n                    gboolean need_db);\n\n#endif\n"
  },
  {
    "path": "server/gc/seafserv-gc.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n\n#include <getopt.h>\n\n#include \"seafile-session.h\"\n#include \"seaf-utils.h\"\n#include \"gc-core.h\"\n#include \"verify.h\"\n\n#include \"utils.h\"\n\nstatic char *ccnet_dir = NULL;\nstatic char *seafile_dir = NULL;\nstatic char *central_config_dir = NULL;\n\nSeafileSession *seaf;\n\nstatic const char *short_opts = \"hvc:d:VDrRF:Ct:i:\";\nstatic const struct option long_opts[] = {\n    { \"help\", no_argument, NULL, 'h', },\n    { \"version\", no_argument, NULL, 'v', },\n    { \"config-file\", required_argument, NULL, 'c', },\n    { \"central-config-dir\", required_argument, NULL, 'F' },\n    { \"seafdir\", required_argument, NULL, 'd', },\n    { \"verbose\", no_argument, NULL, 'V' },\n    { \"dry-run\", no_argument, NULL, 'D' },\n    { \"rm-deleted\", no_argument, NULL, 'r' },\n    { \"rm-fs\", no_argument, NULL, 'R' },\n    { \"check\", no_argument, NULL, 'C' },\n    { \"thread-num\", required_argument, NULL, 't', },\n    { \"id-prefix\", required_argument, NULL, 'i', },\n    { 0, 0, 0, 0 },\n};\n\nstatic void usage ()\n{\n    fprintf (stderr,\n             \"usage: seafserv-gc [-c config_dir] [-d seafile_dir] \"\n             \"[repo_id_1 [repo_id_2 ...]]\\n\"\n             \"Additional options:\\n\"\n             \"-r, --rm-deleted: remove garbaged repos\\n\"\n             \"-R, --rm-fs: remove fs object\\n\"\n             \"-D, --dry-run: report blocks that can be remove, but not remove them\\n\"\n             \"-V, --verbose: verbose output messages\\n\"\n             \"-C, --check: check data integrity\\n\"\n             \"-t, --thread-num: thread number for gc repos\\n\");\n}\n\n#ifdef WIN32\n/* Get the commandline arguments in unicode, then convert them to utf8  */\nstatic char **\nget_argv_utf8 (int *argc)\n{\n    int i = 0;\n    char **argv = NULL;\n    const wchar_t *cmdline = NULL;\n    wchar_t **argv_w = NULL;\n\n    cmdline = GetCommandLineW();\n    argv_w = CommandLineToArgvW (cmdline, argc);\n    if (!argv_w) {\n        printf(\"failed to CommandLineToArgvW(), GLE=%lu\\n\", GetLastError());\n        return NULL;\n    }\n\n    argv = (char **)malloc (sizeof(char*) * (*argc));\n    for (i = 0; i < *argc; i++) {\n        argv[i] = wchar_to_utf8 (argv_w[i]);\n    }\n\n    return argv;\n}\n#endif\n\n#define DEFAULT_THREAD_NUM 10\n\nint\nmain(int argc, char *argv[])\n{\n    int c;\n    int verbose = 0;\n    int dry_run = 0;\n    int rm_garbage = 0;\n    int rm_fs = 0;\n    int check_integrity = 0;\n    int thread_num = 1;\n    const char *debug_str = NULL;\n    char *id_prefix = NULL;\n\n#ifdef WIN32\n    argv = get_argv_utf8 (&argc);\n#endif\n\n    ccnet_dir = DEFAULT_CONFIG_DIR;\n\n    while ((c = getopt_long(argc, argv,\n                short_opts, long_opts, NULL)) != EOF) {\n        switch (c) {\n        case 'h':\n            usage();\n            exit(0);\n        case 'v':\n            exit(-1);\n            break;\n        case 'c':\n            ccnet_dir = strdup(optarg);\n            break;\n        case 'd':\n            seafile_dir = strdup(optarg);\n            break;\n        case 'F':\n            central_config_dir = strdup(optarg);\n            break;\n        case 'V':\n            verbose = 1;\n            break;\n        case 'D':\n            dry_run = 1;\n            break;\n        case 'r':\n            rm_garbage = 1;\n            break;\n        case 'R':\n            rm_fs = 1;\n            break;\n        case 'C':\n            check_integrity = 1;\n            break;\n        case 't':\n            thread_num = atoi(optarg);\n            break;\n        case 'i':\n            id_prefix = g_strdup(optarg);\n            break;\n        default:\n            usage();\n            exit(-1);\n        }\n    }\n\n#if !GLIB_CHECK_VERSION(2, 35, 0)\n    g_type_init();\n#endif\n\n    if (!debug_str)\n        debug_str = g_getenv(\"SEAFILE_DEBUG\");\n    seafile_debug_set_flags_string (debug_str);\n\n    if (seafile_log_init (\"-\", \"info\", \"debug\", \"seafserv-gc\") < 0) {\n        fprintf (stderr, \"Failed to init log.\\n\");\n        exit (1);\n    }\n\n    if (seafile_dir == NULL)\n        seafile_dir = g_build_filename (ccnet_dir, \"seafile-data\", NULL);\n    \n    seaf = seafile_session_new(central_config_dir, seafile_dir, ccnet_dir, TRUE);\n    if (!seaf) {\n        seaf_warning (\"Failed to create seafile session.\\n\");\n        exit (1);\n    }\n\n    if (rm_garbage) {\n        delete_garbaged_repos (dry_run, thread_num);\n        return 0;\n    }\n\n    GList *repo_id_list = NULL;\n    int i;\n    for (i = optind; i < argc; i++)\n        repo_id_list = g_list_append (repo_id_list, g_strdup(argv[i]));\n\n    if (check_integrity) {\n        return verify_repos (repo_id_list);\n    }\n\n    gc_core_run (repo_id_list, id_prefix, dry_run, verbose, thread_num, rm_fs);\n\n    g_free (id_prefix);\n\n    return 0;\n}\n"
  },
  {
    "path": "server/gc/verify.c",
    "content": "#include \"seafile-session.h\"\n#include \"utils.h\"\n#include \"log.h\"\n\ntypedef struct VerifyData {\n    SeafRepo *repo;\n    gint64 truncate_time;\n    gboolean traversed_head;\n    GHashTable *exist_blocks;\n    gboolean traverse_base_commit;\n    GHashTable *visited;\n    GHashTable *visited_commits;\n} VerifyData;\n\nstatic int\ncheck_blocks (VerifyData *data, const char *file_id)\n{\n    SeafRepo *repo = data->repo;\n    Seafile *seafile;\n    int i;\n\n    seafile = seaf_fs_manager_get_seafile (seaf->fs_mgr,\n                                           repo->store_id,\n                                           repo->version,\n                                           file_id);\n    if (!seafile) {\n        seaf_warning (\"Failed to find file %s.\\n\", file_id);\n        return -1;\n    }\n\n    for (i = 0; i < seafile->n_blocks; ++i) {\n        if (!g_hash_table_lookup(data->exist_blocks, seafile->blk_sha1s[i])) {\n            seaf_message (\"Block %s is missing.\\n\", seafile->blk_sha1s[i]);\n        }\n    }\n\n    seafile_unref (seafile);\n\n    return 0;\n}\n\nstatic gboolean\nfs_callback (SeafFSManager *mgr,\n             const char *store_id,\n             int version,\n             const char *obj_id,\n             int type,\n             void *user_data,\n             gboolean *stop)\n{\n    VerifyData *data = user_data;\n\n    if (data->visited != NULL) {\n        if (g_hash_table_lookup (data->visited, obj_id) != NULL) {\n            *stop = TRUE;\n            return TRUE;\n        }\n\n        char *key = g_strdup(obj_id);\n        g_hash_table_replace (data->visited, key, key);\n    }\n\n    if (data->traverse_base_commit) {\n        return TRUE;\n    }\n\n    if (type == SEAF_METADATA_TYPE_FILE && check_blocks (data, obj_id) < 0)\n        return FALSE;\n\n    return TRUE;\n}\n\nstatic gboolean\ntraverse_commit (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    VerifyData *data = vdata;\n    SeafRepo *repo = data->repo;\n    int ret;\n\n    if (data->visited_commits != NULL) {\n        if (g_hash_table_lookup (data->visited_commits, commit->commit_id)) {\n            // Has traversed on prev head commit, stop traverse from this branch\n            *stop = TRUE;\n            return TRUE;\n        }\n    }\n\n    if (data->truncate_time == 0)\n    {\n        *stop = TRUE;\n        /* Stop after traversing the head commit. */\n    }\n    else if (data->truncate_time > 0 &&\n             (gint64)(commit->ctime) < data->truncate_time &&\n             data->traversed_head)\n    {\n        /* Still traverse the first commit older than truncate_time.\n         * If a file in the child commit of this commit is deleted,\n         * we need to access this commit in order to restore it\n         * from trash.\n         */\n        *stop = TRUE;\n    }\n\n    if (!data->traversed_head)\n        data->traversed_head = TRUE;\n\n    ret = seaf_fs_manager_traverse_tree (seaf->fs_mgr,\n                                         repo->store_id,\n                                         repo->version,\n                                         commit->root_id,\n                                         fs_callback,\n                                         vdata, FALSE);\n    if (ret < 0)\n        return FALSE;\n\n    int dummy;\n    g_hash_table_replace (data->visited_commits,\n                          g_strdup (commit->commit_id), &dummy);\n\n    return TRUE;\n}\n\nstatic int\nverify_virtual_repos (VerifyData *data)\n{\n    SeafRepo *repo = data->repo;\n    if (repo->is_virtual) {\n        return 0;\n    }\n\n    data->traverse_base_commit = TRUE;\n\n    GList *vrepo_ids = NULL, *ptr;\n    char *repo_id;\n    SeafVirtRepo *vinfo;\n    int ret = 0;\n\n    vrepo_ids = seaf_repo_manager_get_virtual_repo_ids_by_origin (seaf->repo_mgr,\n                                                                  repo->id);\n\n    for (ptr = vrepo_ids; ptr; ptr = ptr->next) {\n        repo_id = ptr->data;\n        vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id);\n        if (!vinfo) {\n            continue;\n        }\n\n        gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                                 repo->store_id, repo->version,\n                                                                 vinfo->base_commit,\n                                                                 traverse_commit,\n                                                                 data,\n                                                                 FALSE);\n        seaf_virtual_repo_info_free (vinfo);\n        if (!res) {\n            seaf_warning (\"Failed to traverse base commit %s for virtual repo %s.\\n\", vinfo->base_commit, repo_id);\n            ret = -1;\n            goto out;\n        }\n    }\n    data->traverse_base_commit = FALSE;\n\nout:\n    string_list_free (vrepo_ids);\n    return ret;\n\n}\n\nstatic gboolean\ncollect_exist_blocks (const char *store_id, int version,\n                      const char *block_id, void *vdata)\n{\n    GHashTable *exist_blocks = vdata;\n    char *copy = g_strdup (block_id);\n\n    g_hash_table_replace (exist_blocks, copy, copy);\n\n    return TRUE;\n}\n\nstatic int\nverify_repo (SeafRepo *repo)\n{\n    GList *branches, *ptr;\n    SeafBranch *branch;\n    int ret = 0;\n    VerifyData data = {0};\n    data.visited = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n    data.visited_commits = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                   g_free, NULL);\n\n    data.repo = repo;\n    data.truncate_time = seaf_repo_manager_get_repo_truncate_time (repo->manager,\n                                                                   repo->id);\n    data.exist_blocks = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n    ret = seaf_block_manager_foreach_block (seaf->block_mgr,\n                                            repo->store_id, repo->version,\n                                            collect_exist_blocks,\n                                            data.exist_blocks);\n    if (ret < 0) {\n        seaf_warning (\"Failed to collect existing blocks for repo %.8s, stop GC.\\n\\n\",\n                      repo->id);\n        g_hash_table_destroy (data.exist_blocks);\n        return ret;\n    }\n\n    branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);\n    if (branches == NULL) {\n        seaf_warning (\"[GC] Failed to get branch list of repo %s.\\n\", repo->id);\n        g_hash_table_destroy (data.exist_blocks);\n        return -1;\n    }\n\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        branch = ptr->data;\n        gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                                 repo->id,\n                                                                 repo->version,\n                                                                 branch->commit_id,\n                                                                 traverse_commit,\n                                                                 &data, FALSE);\n        seaf_branch_unref (branch);\n        if (!res) {\n            ret = -1;\n            break;\n        }\n    }\n\n    g_list_free (branches);\n\n    if (ret < 0) {\n        g_hash_table_destroy (data.visited);\n        g_hash_table_destroy (data.visited_commits);\n        g_hash_table_destroy (data.exist_blocks);\n        return ret;\n    }\n\n    ret = verify_virtual_repos (&data);\n\n    g_hash_table_destroy (data.visited);\n    g_hash_table_destroy (data.visited_commits);\n    g_hash_table_destroy (data.exist_blocks);\n    return ret;\n}\n\nint\nverify_repos (GList *repo_id_list)\n{\n    if (repo_id_list == NULL)\n        repo_id_list = seaf_repo_manager_get_repo_id_list (seaf->repo_mgr);\n\n    GList *ptr;\n    SeafRepo *repo;\n    int ret = 0;\n\n    for (ptr = repo_id_list; ptr != NULL; ptr = ptr->next) {\n        repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, (const gchar *)ptr->data);\n\n        g_free (ptr->data);\n\n        if (!repo)\n            continue;\n\n        seaf_message (\"Start to verify repo %s\\n\", repo->id);\n        if (repo->is_corrupted) {\n           seaf_warning (\"Repo %s is corrupted.\\n\", repo->id);\n        } else {\n            ret = verify_repo (repo);\n            if (ret < 0) {\n                seaf_warning (\"Failed to verify repo %s\\n\", repo->id);\n                seaf_repo_unref (repo);\n                continue;\n            }\n            seaf_message (\"Verify repo %s success\\n\", repo->id);\n            seaf_repo_unref (repo);\n        }\n    }\n\n    g_list_free (repo_id_list);\n\n    return ret;\n}\n"
  },
  {
    "path": "server/gc/verify.h",
    "content": "#ifndef GC_VERIFY_H\n#define GC_VERIFY_H\n\nint verify_repos (GList *repo_id_list);\n\n#endif\n"
  },
  {
    "path": "server/http-server.c",
    "content": "#include \"common.h\"\n\n#ifdef HAVE_EVHTP\n#include <pthread.h>\n#include <string.h>\n#include <jansson.h>\n#include <locale.h>\n#include <sys/types.h>\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#else\n#include <event.h>\n#endif\n\n#include <evhtp.h>\n\n#include <jwt.h>\n\n#include \"mq-mgr.h\"\n#include \"utils.h\"\n#include \"log.h\"\n#include \"http-server.h\"\n#include \"seafile-session.h\"\n#include \"diff-simple.h\"\n#include \"merge-new.h\"\n#include \"seaf-db.h\"\n#include \"seaf-utils.h\"\n\n#include \"access-file.h\"\n#include \"upload-file.h\"\n#include \"fileserver-config.h\"\n\n#include \"http-status-codes.h\"\n\n#define DEFAULT_BIND_HOST \"0.0.0.0\"\n#define DEFAULT_BIND_PORT 8082\n#define DEFAULT_WORKER_THREADS 10\n#define DEFAULT_MAX_DOWNLOAD_DIR_SIZE 100 * ((gint64)1 << 20) /* 100MB */\n#define DEFAULT_MAX_INDEXING_THREADS 1\n#define DEFAULT_MAX_INDEX_PROCESSING_THREADS 3\n#define DEFAULT_FIXED_BLOCK_SIZE ((gint64)1 << 23) /* 8MB */\n#define DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE 0600\n\n#define HOST \"host\"\n#define PORT \"port\"\n\n#define HTTP_TEMP_FILE_SCAN_INTERVAL  3600 /*1h*/\n#define HTTP_TEMP_FILE_DEFAULT_TTL 3600 * 24 * 3 /*3days*/\n#define HTTP_TEMP_FILE_TTL \"http_temp_file_ttl\"\n#define HTTP_SCAN_INTERVAL \"http_temp_scan_interval\"\n\n#define INIT_INFO \"If you see this page, Seafile HTTP syncing component works.\"\n#define PROTO_VERSION \"{\\\"version\\\": 2}\"\n\n#define CLEANING_INTERVAL_SEC 300\t/* 5 minutes */\n#define TOKEN_EXPIRE_TIME 7200\t    /* 2 hours */\n#define PERM_EXPIRE_TIME 7200       /* 2 hours */\n#define VIRINFO_EXPIRE_TIME 7200       /* 2 hours */\n\n#define FS_ID_LIST_MAX_WORKERS 3\n#define FS_ID_LIST_TOKEN_LEN 36\n\nstruct _HttpServer {\n    evbase_t *evbase;\n    evhtp_t *evhtp;\n    event_t *reap_timer;\n    pthread_t thread_id;\n\n    GHashTable *token_cache;\n    pthread_mutex_t token_cache_lock; /* token -> username */\n\n    GHashTable *perm_cache;\n    pthread_mutex_t perm_cache_lock; /* repo_id:username -> permission */\n\n    GHashTable *vir_repo_info_cache;\n    pthread_mutex_t vir_repo_info_cache_lock;\n\n    GThreadPool *compute_fs_obj_id_pool;\n\n    GHashTable *fs_obj_ids;\n    pthread_mutex_t fs_obj_ids_lock;\n};\ntypedef struct _HttpServer HttpServer;\n\nstruct _StatsEventData {\n    char *etype;\n    char *user;\n    char *operation;\n    char repo_id[37];\n    guint64 bytes;\n};\ntypedef struct _StatsEventData StatsEventData;\n\ntypedef struct TokenInfo {\n    char *repo_id;\n    char *email;\n    gint64 expire_time;\n} TokenInfo;\n\n// PermInfo caches the results from the last permission check for accessing a repo.\n// They're cached in a hash table having \"repo_Id:username:op\" as key.\n// The cached result is updated on the next call to get_check_permission_cb function, or when the cache expires.\n// The result is only cached if the permission check passed.\ntypedef struct PermInfo {\n    gint64 expire_time;\n} PermInfo;\n\ntypedef struct VirRepoInfo {\n    char *store_id;\n    gint64 expire_time;\n} VirRepoInfo;\n\ntypedef struct FsHdr {\n    char obj_id[40];\n    guint32 obj_size;\n} __attribute__((__packed__)) FsHdr;\n\ntypedef enum CheckExistType {\n    CHECK_FS_EXIST,\n    CHECK_BLOCK_EXIST\n} CheckExistType;\n\nconst char *GET_PROTO_PATH = \"/protocol-version\";\nconst char *OP_PERM_CHECK_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/permission-check/.*\";\nconst char *GET_CHECK_QUOTA_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/quota-check/.*\";\nconst char *HEAD_COMMIT_OPER_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/commit/HEAD\";\nconst char *GET_HEAD_COMMITS_MULTI_REGEX = \"^/repo/head-commits-multi\";\nconst char *COMMIT_OPER_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/commit/[\\\\da-z]{40}\";\nconst char *PUT_COMMIT_INFO_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/commit/[\\\\da-z]{40}\";\nconst char *GET_FS_OBJ_ID_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/fs-id-list/.*\";\nconst char *START_FS_OBJ_ID_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/start-fs-id-list/.*\";\nconst char *QUERY_FS_OBJ_ID_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/query-fs-id-list/.*\";\nconst char *RETRIEVE_FS_OBJ_ID_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/retrieve-fs-id-list/.*\";\nconst char *BLOCK_OPER_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/block/[\\\\da-z]{40}\";\nconst char *POST_CHECK_FS_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/check-fs\";\nconst char *POST_CHECK_BLOCK_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/check-blocks\";\nconst char *POST_RECV_FS_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/recv-fs\";\nconst char *POST_PACK_FS_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/pack-fs\";\nconst char *GET_BLOCK_MAP_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/block-map/[\\\\da-z]{40}\";\nconst char *GET_JWT_TOKEN_REGEX = \"^/repo/[\\\\da-z]{8}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{4}-[\\\\da-z]{12}/jwt-token\";\n\n//accessible repos\nconst char *GET_ACCESSIBLE_REPO_LIST_REGEX = \"/accessible-repos\";\n\nstatic void\nload_http_config (HttpServerStruct *htp_server, SeafileSession *session)\n{\n    GError *error = NULL;\n    char *host = NULL;\n    int port = 0;\n    int worker_threads;\n    char *encoding;\n    char *cluster_shared_temp_file_mode = NULL;\n    gboolean verify_client_blocks;\n\n    host = fileserver_config_get_string (session->config, HOST, &error);\n    if (!error) {\n        htp_server->bind_addr = host;\n    } else {\n        if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND &&\n            error->code != G_KEY_FILE_ERROR_GROUP_NOT_FOUND) {\n            seaf_warning (\"[conf] Error: failed to read the value of 'host'\\n\");\n            exit (1);\n        }\n\n        htp_server->bind_addr = g_strdup (DEFAULT_BIND_HOST);\n        g_clear_error (&error);\n    }\n\n    port = fileserver_config_get_integer (session->config, PORT, &error);\n    if (!error) {\n        htp_server->bind_port = port;\n    } else {\n        if (error->code != G_KEY_FILE_ERROR_KEY_NOT_FOUND &&\n            error->code != G_KEY_FILE_ERROR_GROUP_NOT_FOUND) {\n            seaf_warning (\"[conf] Error: failed to read the value of 'port'\\n\");\n            exit (1);\n        }\n\n        htp_server->bind_port = DEFAULT_BIND_PORT;\n        g_clear_error (&error);\n    }\n\n    worker_threads = fileserver_config_get_integer (session->config, \"worker_threads\",\n                                                    &error);\n    if (error) {\n        htp_server->worker_threads = DEFAULT_WORKER_THREADS;\n        g_clear_error (&error);\n    } else {\n        if (worker_threads <= 0)\n            htp_server->worker_threads = DEFAULT_WORKER_THREADS;\n        else\n            htp_server->worker_threads = worker_threads;\n    }\n    seaf_message (\"fileserver: worker_threads = %d\\n\", htp_server->worker_threads);\n\n    verify_client_blocks  = fileserver_config_get_boolean (session->config,\n                                                           \"verify_client_blocks_after_sync\",\n                                                           &error);\n    if (error) {\n        htp_server->verify_client_blocks = TRUE;\n        g_clear_error(&error);\n    } else {\n        htp_server->verify_client_blocks = verify_client_blocks;\n    }\n    seaf_message (\"fileserver: verify_client_blocks = %d\\n\",\n                  htp_server->verify_client_blocks);\n\n    cluster_shared_temp_file_mode = fileserver_config_get_string (session->config,\n                                                                  \"cluster_shared_temp_file_mode\",\n                                                                  &error);\n    if (error) {\n        htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE;\n        g_clear_error (&error);\n    } else {\n        if (!cluster_shared_temp_file_mode) {\n            htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE;\n        } else {\n            htp_server->cluster_shared_temp_file_mode = strtol(cluster_shared_temp_file_mode, NULL, 8);\n\n            if (htp_server->cluster_shared_temp_file_mode < 0001 ||\n                htp_server->cluster_shared_temp_file_mode > 0777)\n                htp_server->cluster_shared_temp_file_mode = DEFAULT_CLUSTER_SHARED_TEMP_FILE_MODE;\n\n            g_free (cluster_shared_temp_file_mode);\n        }\n    }\n    seaf_message (\"fileserver: cluster_shared_temp_file_mode = %o\\n\",\n                  htp_server->cluster_shared_temp_file_mode);\n\n    encoding = g_key_file_get_string (session->config,\n                                      \"zip\", \"windows_encoding\",\n                                      &error);\n    if (encoding) {\n        htp_server->windows_encoding = encoding;\n    } else {\n        g_clear_error (&error);\n        /* No windows specific encoding is specified. Set the ZIP_UTF8 flag. */\n        setlocale (LC_ALL, \"en_US.UTF-8\");\n    }\n}\n\nstatic int\nvalidate_token (HttpServer *htp_server, evhtp_request_t *req,\n                const char *repo_id, char **username,\n                gboolean skip_cache)\n{\n    char *email = NULL;\n    TokenInfo *token_info;\n    char *tmp_token = NULL;\n\n    const char *token = evhtp_kv_find (req->headers_in, \"Seafile-Repo-Token\");\n    if (token == NULL) {\n        const char *auth_token = evhtp_kv_find (req->headers_in, \"Authorization\");\n        tmp_token = seaf_parse_auth_token (auth_token);\n        if (tmp_token == NULL) {\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            return EVHTP_RES_BADREQ;\n        }\n        token = tmp_token;\n    }\n\n    if (!skip_cache) {\n        pthread_mutex_lock (&htp_server->token_cache_lock);\n\n        token_info = g_hash_table_lookup (htp_server->token_cache, token);\n        if (token_info) {\n            if (strcmp (token_info->repo_id, repo_id) != 0) {\n                pthread_mutex_unlock (&htp_server->token_cache_lock);\n                g_free (tmp_token);\n                return EVHTP_RES_FORBIDDEN;\n            }\n\n            if (username)\n                *username = g_strdup(token_info->email);\n            pthread_mutex_unlock (&htp_server->token_cache_lock);\n            g_free (tmp_token);\n            return EVHTP_RES_OK;\n        }\n\n        pthread_mutex_unlock (&htp_server->token_cache_lock);\n    }\n\n    email = seaf_repo_manager_get_email_by_token (seaf->repo_mgr,\n                                                  repo_id, token);\n    if (email == NULL) {\n        pthread_mutex_lock (&htp_server->token_cache_lock);\n        g_hash_table_remove (htp_server->token_cache, token);\n        pthread_mutex_unlock (&htp_server->token_cache_lock);\n        g_free (tmp_token);\n        return EVHTP_RES_FORBIDDEN;\n    }\n\n    token_info = g_new0 (TokenInfo, 1);\n    token_info->repo_id = g_strdup (repo_id);\n    token_info->expire_time = (gint64)time(NULL) + TOKEN_EXPIRE_TIME;\n    token_info->email = email;\n\n    pthread_mutex_lock (&htp_server->token_cache_lock);\n    g_hash_table_insert (htp_server->token_cache, g_strdup (token), token_info);\n    pthread_mutex_unlock (&htp_server->token_cache_lock);\n\n    if (username)\n        *username = g_strdup(email);\n    g_free (tmp_token);\n    return EVHTP_RES_OK;\n}\n\nstatic PermInfo *\nlookup_perm_cache (HttpServer *htp_server, const char *repo_id, const char *username, const char *op)\n{\n    PermInfo *ret = NULL;\n    PermInfo *perm = NULL;\n    char *key = g_strdup_printf (\"%s:%s:%s\", repo_id, username, op);\n\n    pthread_mutex_lock (&htp_server->perm_cache_lock);\n    ret = g_hash_table_lookup (htp_server->perm_cache, key);\n    if (ret) {\n        perm = g_new0 (PermInfo, 1);\n        perm->expire_time = ret->expire_time;\n    }\n    pthread_mutex_unlock (&htp_server->perm_cache_lock);\n    g_free (key);\n\n    return perm;\n}\n\nstatic char *\nget_auth_token (evhtp_request_t *req)\n{\n    const char *token = evhtp_kv_find (req->headers_in, \"Seafile-Repo-Token\");\n    if (token) {\n        return g_strdup (token);\n    }\n\n    char *tmp_token = NULL;\n    const char *auth_token = evhtp_kv_find (req->headers_in, \"Authorization\");\n    tmp_token = seaf_parse_auth_token (auth_token);\n\n    return tmp_token;\n}\n\nstatic void\ninsert_perm_cache (HttpServer *htp_server,\n                   const char *repo_id, const char *username,\n                   const char *op,\n                   PermInfo *perm)\n{\n    char *key = g_strdup_printf (\"%s:%s:%s\", repo_id, username, op);\n\n    pthread_mutex_lock (&htp_server->perm_cache_lock);\n    g_hash_table_insert (htp_server->perm_cache, key, perm);\n    pthread_mutex_unlock (&htp_server->perm_cache_lock);\n}\n\nstatic void\nremove_perm_cache (HttpServer *htp_server,\n                   const char *repo_id, const char *username,\n                   const char *op)\n{\n    char *key = g_strdup_printf (\"%s:%s:%s\", repo_id, username, op);\n\n    pthread_mutex_lock (&htp_server->perm_cache_lock);\n    g_hash_table_remove (htp_server->perm_cache, key);\n    pthread_mutex_unlock (&htp_server->perm_cache_lock);\n\n    g_free (key);\n}\n\nstatic void perm_cache_value_free (gpointer data);\n\nstatic int\ncheck_permission (HttpServer *htp_server, const char *repo_id, const char *username,\n                  const char *op, gboolean skip_cache)\n{\n    PermInfo *perm_info = NULL;\n\n    if (!skip_cache)\n        perm_info = lookup_perm_cache (htp_server, repo_id, username, op);\n\n    if (perm_info) {\n        perm_cache_value_free (perm_info);\n        return EVHTP_RES_OK;\n    }\n\n    remove_perm_cache (htp_server, repo_id, username, op);\n\n    if (strcmp(op, \"upload\") == 0) {\n        int status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, repo_id);\n        if (status != REPO_STATUS_NORMAL && status != -1)\n            return EVHTP_RES_FORBIDDEN;\n    }\n\n    char *perm = seaf_repo_manager_check_permission (seaf->repo_mgr,\n                                                     repo_id, username, NULL);\n    if (perm) {\n        if ((strcmp (perm, \"r\") == 0 && strcmp (op, \"upload\") == 0)) {\n            g_free (perm);\n            return EVHTP_RES_FORBIDDEN;\n        }\n\n        g_free (perm);\n        perm_info = g_new0 (PermInfo, 1);\n        /* Take the reference of perm. */\n        perm_info->expire_time = (gint64)time(NULL) + PERM_EXPIRE_TIME;\n        insert_perm_cache (htp_server, repo_id, username, op, perm_info);\n        return EVHTP_RES_OK;\n    }\n\n    /* Invalidate cache if perm not found in db. */\n    return EVHTP_RES_FORBIDDEN;\n}\n\nstatic gboolean\nget_vir_repo_info (SeafDBRow *row, void *data)\n{\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    if (!repo_id)\n        return FALSE;\n    const char *origin_id = seaf_db_row_get_column_text (row, 1);\n    if (!origin_id)\n        return FALSE;\n\n    VirRepoInfo **vinfo = data;\n    *vinfo = g_new0 (VirRepoInfo, 1);\n    if (!*vinfo)\n        return FALSE;\n    (*vinfo)->store_id = g_strdup (origin_id);\n    if (!(*vinfo)->store_id)\n        return FALSE;\n    (*vinfo)->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME;\n\n    return TRUE;\n}\n\nstatic char *\nget_store_id_from_vir_repo_info_cache (HttpServer *htp_server, const char *repo_id)\n{\n    char *store_id = NULL;\n    VirRepoInfo *vinfo = NULL;\n\n    pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock);\n    vinfo = g_hash_table_lookup (htp_server->vir_repo_info_cache, repo_id);\n\n    if (vinfo) {\n        if (vinfo->store_id)\n            store_id = g_strdup (vinfo->store_id);\n        else\n            store_id = g_strdup (repo_id);\n\n        vinfo->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME;\n    }\n\n    pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock);\n\n    return store_id;\n}\n\nstatic void\nadd_vir_info_to_cache (HttpServer *htp_server, const char *repo_id,\n                       VirRepoInfo *vinfo)\n{\n    pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock);\n    g_hash_table_insert (htp_server->vir_repo_info_cache, g_strdup (repo_id), vinfo);\n    pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock);\n}\n\nstatic char *\nget_repo_store_id (HttpServer *htp_server, const char *repo_id)\n{\n    char *store_id = get_store_id_from_vir_repo_info_cache (htp_server,\n                                                            repo_id);\n    if (store_id) {\n        return store_id;\n    }\n\n    VirRepoInfo *vinfo = NULL;\n    char *sql = \"SELECT repo_id, origin_repo FROM VirtualRepo where repo_id = ?\";\n    int n_row = seaf_db_statement_foreach_row (seaf->db, sql, get_vir_repo_info,\n                                               &vinfo, 1, \"string\", repo_id);\n    if (n_row < 0) {\n        // db error, return NULL\n        return NULL;\n    } else if (n_row == 0) {\n        // repo is not virtual repo\n        vinfo = g_new0 (VirRepoInfo, 1);\n        if (!vinfo)\n            return NULL;\n        vinfo->expire_time = time (NULL) + VIRINFO_EXPIRE_TIME;\n\n        add_vir_info_to_cache (htp_server, repo_id, vinfo);\n\n        return g_strdup (repo_id);\n    } else if (!vinfo || !vinfo->store_id) {\n        // out of memory, return NULL\n        return NULL;\n    }\n\n    add_vir_info_to_cache (htp_server, repo_id, vinfo);\n\n    return g_strdup (vinfo->store_id);\n}\n\ntypedef struct {\n    char *etype;\n    char *user;\n    char *ip;\n    char repo_id[37];\n    char *path;\n    char *client_name;\n} RepoEventData;\n\n\nstatic void\nfree_repo_event_data (RepoEventData *data)\n{\n    if (!data)\n        return;\n\n    g_free (data->etype);\n    g_free (data->user);\n    g_free (data->ip);\n    g_free (data->path);\n    g_free (data->client_name);\n    g_free (data);\n}\n\nstatic void\nfree_stats_event_data (StatsEventData *data)\n{\n    if (!data)\n        return;\n\n    g_free (data->etype);\n    g_free (data->user);\n    g_free (data->operation);\n    g_free (data);\n}\n\nstatic void\npublish_repo_event (RepoEventData *rdata)\n{\n    json_t *msg = json_object ();\n    char *msg_str = NULL;\n\n    json_object_set_new (msg, \"msg_type\", json_string(rdata->etype));\n    json_object_set_new (msg, \"user_name\", json_string(rdata->user));\n    json_object_set_new (msg, \"ip\", json_string(rdata->ip));\n    if (rdata->client_name) {\n        json_object_set_new (msg, \"user_agent\", json_string(rdata->client_name));\n    } else {\n        json_object_set_new (msg, \"user_agent\", json_string(\"\"));\n    }\n    json_object_set_new (msg, \"repo_id\", json_string(rdata->repo_id));\n    if (rdata->path) {\n        json_object_set_new (msg, \"file_path\", json_string(rdata->path));\n    } else {\n        json_object_set_new (msg, \"file_path\", json_string(\"/\"));\n    }\n\n    msg_str = json_dumps (msg, JSON_PRESERVE_ORDER);\n\n    seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_EVENT, msg_str);\n\n    g_free (msg_str);\n    json_decref (msg);\n}\n\nstatic void\npublish_stats_event (StatsEventData *rdata)\n{\n    json_t *msg = json_object ();\n    char *msg_str = NULL;\n\n    json_object_set_new (msg, \"msg_type\", json_string(rdata->etype));\n    json_object_set_new (msg, \"user_name\", json_string(rdata->user));\n    json_object_set_new (msg, \"repo_id\", json_string(rdata->repo_id));\n    json_object_set_new (msg, \"bytes\", json_integer(rdata->bytes));\n\n    msg_str = json_dumps (msg, JSON_PRESERVE_ORDER);\n\n    seaf_mq_manager_publish_event (seaf->mq_mgr, SEAFILE_SERVER_CHANNEL_STATS, msg_str);\n\n    g_free (msg_str);\n    json_decref (msg);\n}\n\nstatic void\non_repo_oper (HttpServer *htp_server, const char *etype,\n              const char *repo_id, char *user, char *ip, char *client_name)\n{\n    RepoEventData *rdata = g_new0 (RepoEventData, 1);\n    SeafVirtRepo *vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr,\n                                                                   repo_id);\n\n    if (vinfo) {\n        memcpy (rdata->repo_id, vinfo->origin_repo_id, 36);\n        rdata->path = g_strdup(vinfo->path);\n    } else\n        memcpy (rdata->repo_id, repo_id, 36);\n    rdata->etype = g_strdup (etype);\n    rdata->user = g_strdup (user);\n    rdata->ip = g_strdup (ip);\n    rdata->client_name = g_strdup(client_name);\n\n    publish_repo_event(rdata);\n    if (vinfo) {\n        g_free (vinfo->path);\n        g_free (vinfo);\n    }\n    free_repo_event_data (rdata);    \n    return;\n}\n\nvoid\nsend_statistic_msg (const char *repo_id, char *user, char *operation, guint64 bytes)\n{\n    StatsEventData *rdata = g_new0 (StatsEventData, 1);\n\n    memcpy (rdata->repo_id, repo_id, 36);\n    rdata->etype = g_strdup (operation);\n    rdata->user = g_strdup (user);\n    rdata->bytes = bytes;\n\n    publish_stats_event(rdata);\n\n    free_stats_event_data (rdata);    \n    return;\n}\n\nchar *\nget_client_ip_addr (void *data)\n{\n    evhtp_request_t *req = data;\n    const char *xff = evhtp_kv_find (req->headers_in, \"X-Forwarded-For\");\n    if (xff) {\n        struct in_addr addr;\n        const char *comma = strchr (xff, ',');\n        char *copy;\n        if (comma)\n            copy = g_strndup(xff, comma-xff);\n        else\n            copy = g_strdup(xff);\n        if (evutil_inet_pton (AF_INET, copy, &addr) == 1)\n            return copy;\n        else if (evutil_inet_pton (AF_INET6, copy, &addr) == 1)\n            return copy;\n        g_free (copy);\n    }\n\n    evhtp_connection_t *conn = req->conn;\n    if (conn->saddr->sa_family == AF_INET) {\n        char ip_addr[17];\n        const char *ip = NULL;\n        struct sockaddr_in *addr_in = (struct sockaddr_in *)conn->saddr;\n\n        memset (ip_addr, '\\0', 17);\n        ip = evutil_inet_ntop (AF_INET, &addr_in->sin_addr, ip_addr, 16);\n\n        return g_strdup (ip);\n    }\n\n    char ip_addr[47];\n    const char *ip = NULL;\n    struct sockaddr_in6 *addr_in = (struct sockaddr_in6 *)conn->saddr;\n\n    memset (ip_addr, '\\0', 47);\n    ip = evutil_inet_ntop (AF_INET6, &addr_in->sin6_addr, ip_addr, 46);\n\n    return g_strdup (ip);\n}\n\nstatic int\nvalidate_client_ver (const char *client_ver)\n{\n    char **versions = NULL;\n    char *next_str = NULL;\n\n    versions = g_strsplit (client_ver, \".\", 3);\n    if (g_strv_length (versions) != 3) {\n        g_strfreev (versions);\n        return EVHTP_RES_BADREQ;\n    }\n\n    strtoll (versions[0], &next_str, 10);\n    if (versions[0] == next_str) {\n        g_strfreev (versions);\n        return EVHTP_RES_BADREQ;\n    }\n\n    strtoll (versions[1], &next_str, 10);\n    if (versions[1] == next_str) {\n        g_strfreev (versions);\n        return EVHTP_RES_BADREQ;\n    }\n\n    strtoll (versions[2], &next_str, 10);\n    if (versions[2] == next_str) {\n        g_strfreev (versions);\n        return EVHTP_RES_BADREQ;\n    }\n\n    // todo: judge whether version is too old, then return 426\n\n    g_strfreev (versions);\n    return EVHTP_RES_OK;\n}\n\nstatic void\nget_check_permission_cb (evhtp_request_t *req, void *arg)\n{\n    const char *op = evhtp_kv_find (req->uri->query, \"op\");\n    if (op == NULL || (strcmp (op, \"upload\") != 0 && strcmp (op, \"download\") != 0)) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *client_id = evhtp_kv_find (req->uri->query, \"client_id\");\n    if (client_id && strlen(client_id) != 40) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *client_ver = evhtp_kv_find (req->uri->query, \"client_ver\");\n    if (client_ver) {\n        int status = validate_client_ver (client_ver);\n        if (status != EVHTP_RES_OK) {\n            evhtp_send_reply (req, status);\n            return;\n        }\n    }\n\n    char *client_name = NULL;\n    const char *client_name_in = evhtp_kv_find (req->uri->query, \"client_name\");\n    if (client_name_in)\n        client_name = g_uri_unescape_string (client_name_in, NULL);\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n    HttpServer *htp_server = seaf->http_server->priv;\n    char *username = NULL;\n    char *ip = NULL;\n    const char *token;\n    SeafRepo *repo = NULL;\n\n    repo = seaf_repo_manager_get_repo_ex (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED);\n        goto out;\n    }\n    if (repo->is_corrupted || repo->repaired) {\n        evhtp_send_reply (req, SEAF_HTTP_RES_REPO_CORRUPTED);\n        goto out;\n    }\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, TRUE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    /* We shall actually check the permission from database, don't rely on\n     * the cache here.\n     */\n    int perm_status = check_permission (htp_server, repo_id, username, op, TRUE);\n    if (perm_status == EVHTP_RES_FORBIDDEN) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    ip = get_client_ip_addr (req);\n    if (!ip) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        token = evhtp_kv_find (req->headers_in, \"Seafile-Repo-Token\");\n        seaf_warning (\"[%s] Failed to get client ip.\\n\", token);\n        goto out;\n    }\n\n    if (strcmp (op, \"download\") == 0) {\n        on_repo_oper (htp_server, \"repo-download-sync\", repo_id, username, ip, client_name);\n    }\n    /* else if (strcmp (op, \"upload\") == 0) { */\n    /*     on_repo_oper (htp_server, \"repo-upload-sync\", repo_id, username, ip, client_name); */\n    /* } */\n\n    if (client_id && client_name) {\n        token = evhtp_kv_find (req->headers_in, \"Seafile-Repo-Token\");\n\n        /* Record the (token, email, <peer info>) information, <peer info> may\n         * include peer_id, peer_ip, peer_name, etc.\n         */\n        if (!seaf_repo_manager_token_peer_info_exists (seaf->repo_mgr, token))\n            seaf_repo_manager_add_token_peer_info (seaf->repo_mgr,\n                                                   token,\n                                                   client_id,\n                                                   ip,\n                                                   client_name,\n                                                   (gint64)time(NULL),\n                                                   client_ver);\n        else\n            seaf_repo_manager_update_token_peer_info (seaf->repo_mgr,\n                                                      token,\n                                                      ip,\n                                                      (gint64)time(NULL),\n                                                      client_ver);\n    }\n\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    g_free (username);\n    g_strfreev (parts);\n    g_free (ip);\n    g_free (client_name);\n    if (repo) {\n        seaf_repo_unref (repo);\n    }\n}\n\nstatic void\nget_protocol_cb (evhtp_request_t *req, void *arg)\n{\n    evbuffer_add (req->buffer_out, PROTO_VERSION, strlen (PROTO_VERSION));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n}\n\nstatic void\nget_check_quota_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    const char *delta = evhtp_kv_find (req->uri->query, \"delta\");\n    if (delta == NULL) {\n        char *error = \"Invalid delta parameter.\\n\";\n        seaf_warning (\"%s\", error);\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    char *next_ptr = NULL;\n    gint64 delta_num = strtoll(delta, &next_ptr, 10);\n    if (!(*delta != '\\0' && *next_ptr == '\\0')) {\n        char *error = \"Invalid delta parameter.\\n\";\n        seaf_warning (\"%s\", error);\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    int ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                         repo_id, delta_num);\n    if (ret < 0) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n    } else if (ret == 0) {\n        evhtp_send_reply (req, EVHTP_RES_OK);\n    } else {\n        evhtp_send_reply (req, SEAF_HTTP_RES_NOQUOTA);\n    }\n\nout:\n    g_strfreev (parts);\n}\n\nstatic gboolean\nget_branch (SeafDBRow *row, void *vid)\n{\n    char *ret = vid;\n    const char *commit_id;\n\n    commit_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (ret, commit_id, 41);\n\n    return FALSE;\n}\n\nstatic void\nget_head_commit_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n    gboolean db_err = FALSE, exists = TRUE;\n    int token_status;\n    char commit_id[41];\n    char *sql;\n\n    sql = \"SELECT 1 FROM Repo WHERE repo_id=?\";\n    exists = seaf_db_statement_exists (seaf->db, sql, &db_err, 1, \"string\", repo_id);\n    if (!exists) {\n        if (db_err) {\n            seaf_warning (\"DB error when check repo existence.\\n\");\n            evbuffer_add_printf (req->buffer_out,\n                                 \"{\\\"is_corrupted\\\": 1}\");\n            evhtp_send_reply (req, EVHTP_RES_OK);\n            goto out;\n        }\n        evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED);\n        goto out;\n    }\n\n    token_status = validate_token (htp_server, req, repo_id, NULL, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    commit_id[0] = 0;\n\n    sql = \"SELECT commit_id FROM Branch WHERE name='master' AND repo_id=?\";\n    if (seaf_db_statement_foreach_row (seaf->db, sql,\n                                       get_branch, commit_id,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"DB error when get branch master.\\n\");\n        evbuffer_add_printf (req->buffer_out,\n                             \"{\\\"is_corrupted\\\": 1}\");\n        evhtp_send_reply (req, EVHTP_RES_OK);\n        goto out;\n    }\n\n    if (commit_id[0] == 0) {\n        evhtp_send_reply (req, SEAF_HTTP_RES_REPO_DELETED);\n        goto out;\n    }\n\n    evbuffer_add_printf (req->buffer_out,\n                         \"{\\\"is_corrupted\\\": 0, \\\"head_commit_id\\\": \\\"%s\\\"}\",\n                         commit_id);\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    g_strfreev (parts);\n}\n\nstatic char *\ngen_merge_description (SeafRepo *repo,\n                       const char *merged_root,\n                       const char *p1_root,\n                       const char *p2_root)\n{\n    GList *p;\n    GList *results = NULL;\n    char *desc;\n\n    diff_merge_roots (repo->store_id, repo->version,\n                      merged_root, p1_root, p2_root, &results, TRUE);\n\n    desc = diff_results_to_description (results);\n\n    for (p = results; p; p = p->next) {\n        DiffEntry *de = p->data;\n        diff_entry_free (de);\n    }\n    g_list_free (results);\n\n    return desc;\n}\n\nstatic int\nfast_forward_or_merge (const char *repo_id,\n                       SeafCommit *base,\n                       SeafCommit *new_commit,\n                       const char *token,\n                       gboolean *is_gc_conflict)\n{\n#define MAX_RETRY_COUNT 3\n\n    SeafRepo *repo = NULL;\n    SeafCommit *current_head = NULL, *merged_commit = NULL;\n    int retry_cnt = 0;\n    int ret = 0;\n    char *last_gc_id = NULL;\n    gboolean check_gc;\n    gboolean gc_conflict = FALSE;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Repo %s doesn't exist.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    /* In some uploads, no blocks need to be uploaded. For example, deleting\n     * a file or folder. In such cases, checkbl won't be called.\n     * So the last gc id is not inserted to the database. We don't need to\n     * check gc for these cases since no new blocks are uploaded.\n     *\n     * Note that having a 'NULL' gc id in database is not the same as not having\n     * a last gc id record. The former one indicates that, before block upload,\n     * no GC has been performed; the latter one indicates no _new_ blocks are\n     * being referenced by this new commit.\n     */\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        check_gc = FALSE;\n    else\n        check_gc = seaf_repo_has_last_gc_id (repo, token);\n\n    if (check_gc) {\n        last_gc_id = seaf_repo_get_last_gc_id (repo, token);\n        seaf_repo_remove_last_gc_id (repo, token);\n    }\n\nretry:\n    current_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                   repo->id, repo->version,\n                                                   repo->head->commit_id);\n    if (!current_head) {\n        seaf_warning (\"Failed to find head commit of %s.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    /* Merge if base and head are not the same. */\n    if (strcmp (base->commit_id, current_head->commit_id) != 0) {\n        MergeOptions opt;\n        const char *roots[3];\n        char *desc = NULL;\n\n        memset (&opt, 0, sizeof(opt));\n        opt.n_ways = 3;\n        memcpy (opt.remote_repo_id, repo_id, 36);\n        memcpy (opt.remote_head, new_commit->commit_id, 40);\n        opt.do_merge = TRUE;\n\n        roots[0] = base->root_id; /* base */\n        roots[1] = current_head->root_id; /* head */\n        roots[2] = new_commit->root_id;      /* remote */\n\n        if (seaf_merge_trees (repo->store_id, repo->version, 3, roots, &opt) < 0) {\n            seaf_warning (\"Failed to merge.\\n\");\n            ret = -1;\n            goto out;\n        }\n\n        if (!opt.conflict)\n            desc = g_strdup(\"Auto merge by system\");\n        else {\n            desc = gen_merge_description (repo,\n                                          opt.merged_tree_root,\n                                          current_head->root_id,\n                                          new_commit->root_id);\n            if (!desc)\n                desc = g_strdup(\"Auto merge by system\");\n        }\n\n        merged_commit = seaf_commit_new(NULL, repo->id, opt.merged_tree_root,\n                                        new_commit->creator_name, EMPTY_SHA1,\n                                        desc,\n                                        0);\n        g_free (desc);\n\n        merged_commit->parent_id = g_strdup (current_head->commit_id);\n        merged_commit->second_parent_id = g_strdup (new_commit->commit_id);\n        merged_commit->new_merge = TRUE;\n        if (opt.conflict)\n            merged_commit->conflict = TRUE;\n        seaf_repo_to_commit (repo, merged_commit);\n\n        if (seaf_commit_manager_add_commit (seaf->commit_mgr, merged_commit) < 0) {\n            seaf_warning (\"Failed to add commit.\\n\");\n            ret = -1;\n            goto out;\n        }\n    } else {\n        seaf_commit_ref (new_commit);\n        merged_commit = new_commit;\n    }\n\n    seaf_branch_set_commit(repo->head, merged_commit->commit_id);\n\n    gc_conflict = FALSE;\n\n    if (seaf_branch_manager_test_and_update_branch(seaf->branch_mgr,\n                                                   repo->head,\n                                                   current_head->commit_id,\n                                                   check_gc, last_gc_id,\n                                                   repo->store_id,\n                                                   &gc_conflict) < 0)\n    {\n        if (gc_conflict) {\n            if (is_gc_conflict) {\n                *is_gc_conflict = TRUE;\n            }\n            seaf_warning (\"Head branch update for repo %s conflicts with GC.\\n\",\n                          repo_id);\n            ret = -1;\n            goto out;\n        }\n\n        seaf_repo_unref (repo);\n        repo = NULL;\n        seaf_commit_unref (current_head);\n        current_head = NULL;\n        seaf_commit_unref (merged_commit);\n        merged_commit = NULL;\n\n        if (++retry_cnt <= MAX_RETRY_COUNT) {\n            /* Sleep random time between 100 and 1000 millisecs. */\n            usleep (g_random_int_range(1, 11) * 100 * 1000);\n\n            repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n            if (!repo) {\n                seaf_warning (\"Repo %s doesn't exist.\\n\", repo_id);\n                ret = -1;\n                goto out;\n            }\n\n            goto retry;\n        } else {\n            ret = -1;\n            goto out;\n        }\n    }\n\nout:\n    g_free (last_gc_id);\n    seaf_commit_unref (current_head);\n    seaf_commit_unref (merged_commit);\n    seaf_repo_unref (repo);\n    return ret;\n}\n\ntypedef struct CheckBlockAux {\n    GList *file_list;\n    const char *store_id;\n    int version;\n} CheckBlockAux;\n\nstatic int\ncheck_file_blocks (int n, const char *basedir, SeafDirent *files[], void *data)\n{\n    Seafile *file = NULL;\n    char *block_id;\n    int i = 0;\n    SeafDirent *file1 = files[0];\n    SeafDirent *file2 = files[1];\n    CheckBlockAux *aux = (CheckBlockAux*)data;\n\n    if (!file2 || strcmp (file2->id, EMPTY_SHA1) == 0 || (file1 && strcmp (file1->id, file2->id) == 0)) {\n        return 0;\n    }\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr, aux->store_id, aux->version, file2->id);\n    if (!file) {\n        return -1;\n    }\n\n    for (i = 0; i < file->n_blocks; ++i) {\n        block_id = file->blk_sha1s[i];\n        if (!seaf_block_manager_block_exists (seaf->block_mgr, aux->store_id, aux->version, block_id)) {\n            aux->file_list = g_list_prepend (aux->file_list, g_strdup (file2->name));\n            goto out;\n        }\n    }\n\nout:\n    seafile_unref (file);\n    return 0;\n}\n\nstatic int\ncheck_dir_cb (int n, const char *basedir, SeafDirent *dirs[], void *data,\n              gboolean *recurse)\n{\n    SeafDirent *dir1 = dirs[0];\n    SeafDirent *dir2 = dirs[1];\n\n    if (!dir1) {\n        // if dir2 is empty, stop diff.\n        if (g_strcmp0 (dir2->id, EMPTY_SHA1) == 0) {\n            *recurse = FALSE;\n        } else {\n            *recurse = TRUE;\n        }\n        return 0;\n    }\n\n    // if dir2 is not exist, stop diff.\n    if (!dir2) {\n        *recurse = FALSE;\n        return 0;\n    }\n\n    // if dir1 and dir2 are the same or dir2 is empty, stop diff.\n    if (g_strcmp0 (dir1->id, dir2->id) == 0 || g_strcmp0 (dir2->id, EMPTY_SHA1) == 0) {\n        *recurse = FALSE;\n        return 0;\n    }\n\n    return 0;\n}\n\nstatic int\ncheck_blocks (SeafRepo *repo, SeafCommit *base, SeafCommit *remote, char **ret_body) {\n    DiffOptions opts;\n    memset (&opts, 0, sizeof(opts));\n    memcpy (opts.store_id, repo->store_id, 36);\n    opts.version = repo->version;\n\n    opts.file_cb = check_file_blocks;\n    opts.dir_cb = check_dir_cb;\n\n    CheckBlockAux aux;\n    memset (&aux, 0, sizeof(aux));\n    aux.store_id = repo->store_id;\n    aux.version = repo->version;\n    opts.data = &aux;\n\n    const char *trees[2];\n    trees[0] = base->root_id;\n    trees[1] = remote->root_id;\n\n    if (diff_trees (2, trees, &opts) < 0) {\n        seaf_warning (\"Failed to diff base and remote head for repo %.8s.\\n\",\n                      repo->id);\n        return -1;\n    }\n\n    if (!aux.file_list) {\n        return 0;\n    }\n\n    json_t *obj_array = json_array ();\n    GList *ptr;\n    for (ptr = aux.file_list; ptr; ptr = ptr->next) {\n        json_array_append_new (obj_array, json_string (ptr->data));\n        g_free (ptr->data);\n    }\n    g_list_free (aux.file_list);\n\n    *ret_body = json_dumps (obj_array, JSON_COMPACT);\n    json_decref (obj_array);\n\n    return -1;\n}\n\ngboolean\nshould_ignore (const char *filename)\n{\n    char **components = g_strsplit (filename, \"/\", -1);\n    int n_comps = g_strv_length (components);\n    int j = 0;\n    char *file_name;\n\n    for (; j < n_comps; ++j) {\n        file_name = components[j];\n        if (g_strcmp0(file_name, \"..\") == 0) {\n            g_strfreev (components);\n            return TRUE;\n        }\n    }\n    g_strfreev (components);\n\n    return FALSE;\n}\n\nstatic gboolean\ninclude_invalid_path (SeafCommit *base_commit, SeafCommit *new_commit) {\n    GList *diff_entries = NULL;\n    gboolean ret = FALSE;\n\n    int rc = diff_commits (base_commit, new_commit, &diff_entries, TRUE);\n    if (rc < 0) {\n        seaf_warning (\"Failed to check invalid path.\\n\");\n        return FALSE;\n    }\n\n    GList *ptr;\n    DiffEntry *diff_entry;\n    for (ptr = diff_entries; ptr; ptr = ptr->next) {\n        diff_entry = ptr->data;\n        if (diff_entry->new_name) {\n            if (should_ignore(diff_entry->new_name)) {\n                ret = TRUE;\n                break;\n            }\n        } else {\n            if (should_ignore(diff_entry->name)) {\n                ret = TRUE;\n                break;\n            }\n        }\n    }\n\n    return ret;\n}\n\nstatic void\nput_update_branch_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts;\n    char *repo_id;\n    char *username = NULL;\n    SeafRepo *repo = NULL;\n    SeafCommit *new_commit = NULL, *base = NULL;\n    char *token = NULL;\n\n    const char *new_commit_id = evhtp_kv_find (req->uri->query, \"head\");\n    if (new_commit_id == NULL || !is_object_id_valid (new_commit_id)) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"upload\", FALSE);\n    if (perm_status == EVHTP_RES_FORBIDDEN) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Repo %s is missing or corrupted.\\n\", repo_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    /* Since this is the last step of upload procedure, commit should exist. */\n    new_commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                 repo->id, repo->version,\n                                                 new_commit_id);\n    if (!new_commit) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    base = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo->id, repo->version,\n                                           new_commit->parent_id);\n    if (!base) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    if (include_invalid_path (base, new_commit)) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    if (seaf_quota_manager_check_quota (seaf->quota_mgr, repo_id) < 0) {\n        evhtp_send_reply (req, SEAF_HTTP_RES_NOQUOTA);\n        goto out;\n    }\n\n    token = get_auth_token (req);\n\n    if (seaf->http_server->verify_client_blocks) {\n        char *ret_body = NULL;\n        int rc = check_blocks(repo, base, new_commit, &ret_body);\n        if (rc < 0) {\n            if (ret_body) {\n                evbuffer_add (req->buffer_out, ret_body, strlen (ret_body));\n            }\n            evhtp_send_reply (req, SEAF_HTTP_RES_BLOCK_MISSING);\n            g_free (ret_body);\n            goto out;\n        }\n    }\n\n    gboolean gc_conflict = FALSE;\n    if (fast_forward_or_merge (repo_id, base, new_commit, token, &gc_conflict) < 0) {\n        if (gc_conflict) {\n            char *msg = \"GC Conflict.\\n\";\n            evbuffer_add (req->buffer_out, msg, strlen (msg));\n            evhtp_send_reply (req, EVHTP_RES_CONFLICT);\n        } else {\n            seaf_warning (\"Fast forward merge for repo %s is failed.\\n\", repo_id);\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        }\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, repo_id, NULL);\n\n    schedule_repo_size_computation (seaf->size_sched, repo_id);\n\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    g_free (token);\n    seaf_repo_unref (repo);\n    seaf_commit_unref (new_commit);\n    seaf_commit_unref (base);\n    g_free (username);\n    g_strfreev (parts);\n}\n\nstatic void\nhead_commit_oper_cb (evhtp_request_t *req, void *arg)\n{\n   htp_method req_method = evhtp_request_get_method (req);\n\n   if (req_method == htp_method_GET) {\n       get_head_commit_cb (req, arg);\n   } else if (req_method == htp_method_PUT) {\n       put_update_branch_cb (req, arg);\n   }\n}\n\nstatic gboolean\ncollect_head_commit_ids (SeafDBRow *row, void *data)\n{\n    json_t *map = (json_t *)data;\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    const char *commit_id = seaf_db_row_get_column_text (row, 1);\n\n    json_object_set_new (map, repo_id, json_string(commit_id));\n\n    return TRUE;\n}\n\nstatic void\nhead_commits_multi_cb (evhtp_request_t *req, void *arg)\n{\n    size_t list_len;\n    json_t *repo_id_array = NULL;\n    size_t n, i;\n    GString *id_list_str = NULL;\n    char *sql = NULL;\n    json_t *commit_id_map = NULL;\n    char *data = NULL;\n\n    list_len = evbuffer_get_length (req->buffer_in);\n    if (list_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    char *repo_id_list_con = g_new0 (char, list_len);\n    if (!repo_id_list_con) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %lu bytes memory.\\n\", list_len);\n        goto out;\n    }\n\n    json_error_t jerror;\n    evbuffer_remove (req->buffer_in, repo_id_list_con, list_len);\n    repo_id_array = json_loadb (repo_id_list_con, list_len, 0, &jerror);\n    g_free (repo_id_list_con);\n\n    if (!repo_id_array) {\n        seaf_warning (\"load repo_id_list to json failed, error: %s\\n\", jerror.text);\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    n = json_array_size (repo_id_array);\n    if (n == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    json_t *id;\n    id_list_str = g_string_new (\"\");\n    for (i = 0; i < n; ++i) {\n        id = json_array_get (repo_id_array, i);\n        if (json_typeof(id) != JSON_STRING) {\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            goto out;\n        }\n        /* Make sure ids are in UUID format. */\n        if (!is_uuid_valid (json_string_value (id))) {\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            goto out;\n        }\n        if (i == 0)\n            g_string_append_printf (id_list_str, \"'%s'\", json_string_value(id));\n        else\n            g_string_append_printf (id_list_str, \",'%s'\", json_string_value(id));\n    }\n\n    if (seaf_db_type (seaf->db) == SEAF_DB_TYPE_MYSQL)\n        sql = g_strdup_printf (\"SELECT repo_id, commit_id FROM Branch WHERE name='master' AND repo_id IN (%s) LOCK IN SHARE MODE\",\n                                id_list_str->str);\n    else\n        sql = g_strdup_printf (\"SELECT repo_id, commit_id FROM Branch WHERE name='master' AND repo_id IN (%s)\",\n                                id_list_str->str);\n    commit_id_map = json_object();\n    if (seaf_db_statement_foreach_row (seaf->db, sql,\n                                       collect_head_commit_ids, commit_id_map, 0) < 0) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    data = json_dumps (commit_id_map, JSON_COMPACT);\n    if (!data) {\n        seaf_warning (\"failed to dump json.\\n\");\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    evbuffer_add (req->buffer_out, data, strlen(data));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    if (repo_id_array)\n        json_decref (repo_id_array);\n    if (id_list_str)\n        g_string_free (id_list_str, TRUE);\n    g_free (sql);\n    if (commit_id_map)\n        json_decref (commit_id_map);\n    if (data)\n        free (data);\n}\n\nstatic void\nget_commit_info_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n    char *commit_id = parts[3];\n    char *username = NULL;\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    char *data = NULL;\n    int len;\n\n    int ret = seaf_obj_store_read_obj (seaf->commit_mgr->obj_store, repo_id, 1,\n                                       commit_id, (void **)&data, &len);\n    if (ret < 0) {\n        seaf_warning (\"Get commit info failed: commit %s is missing.\\n\", commit_id);\n        evhtp_send_reply (req, EVHTP_RES_NOTFOUND);\n        goto out;\n    }\n\n    evbuffer_add (req->buffer_out, data, len);\n    evhtp_send_reply (req, EVHTP_RES_OK);\n    g_free (data);\n\nout:\n    g_free (username);\n    g_strfreev (parts);\n}\n\nstatic int\nsave_last_gc_id (const char *repo_id, const char *token)\n{\n    SeafRepo *repo;\n    char *gc_id;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to find repo %s.\\n\", repo_id);\n        return -1;\n    }\n\n    gc_id = seaf_repo_get_current_gc_id (repo);\n\n    seaf_repo_set_last_gc_id (repo, token, gc_id);\n\n    g_free (gc_id);\n    seaf_repo_unref (repo);\n\n    return 0;\n}\n\nstatic void\nput_commit_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n    char *commit_id = parts[3];\n    char *username = NULL;\n    void *data = NULL;\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"upload\", FALSE);\n    if (perm_status == EVHTP_RES_FORBIDDEN) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    int con_len = evbuffer_get_length (req->buffer_in);\n    if(con_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    data = g_new0 (char, con_len);\n    if (!data) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %d bytes memory.\\n\", con_len);\n        goto out;\n    }\n\n    evbuffer_remove (req->buffer_in, data, con_len);\n    SeafCommit *commit = seaf_commit_from_data (commit_id, (char *)data, con_len);\n    if (!commit) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    if (strcmp (commit->repo_id, repo_id) != 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n    } else {\n        /* Last GCID must be set before checking blocks. However, in http sync,\n         * block list may be sent in multiple http requests. There is no way to\n         * tell which one is the first check block request.\n         * \n         * So we set the last GCID just before replying to upload commit\n         * request. One consequence is that even if the following upload\n         * doesn't upload new blocks, we still need to check gc conflict in\n         * update-branch request. Since gc conflict is a rare case, this solution\n         * won't introduce many more gc conflicts.\n         */\n        char *token = get_auth_token (req);\n        if (save_last_gc_id (repo_id, token) < 0) {\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        } else\n            evhtp_send_reply (req, EVHTP_RES_OK);\n        g_free (token);\n    }\n    seaf_commit_unref (commit);\n\nout:\n    g_free (username);\n    g_free (data);\n    g_strfreev (parts);\n}\n\nstatic void\ncommit_oper_cb (evhtp_request_t *req, void *arg)\n{\n    htp_method req_method = evhtp_request_get_method (req);\n\n    if (req_method == htp_method_PUT) {\n        put_commit_cb (req, arg);\n    } else if (req_method == htp_method_GET) {\n        get_commit_info_cb (req, arg);\n    }\n}\n\nstatic int\ncollect_file_ids (int n, const char *basedir, SeafDirent *files[], void *data)\n{\n    SeafDirent *file1 = files[0];\n    SeafDirent *file2 = files[1];\n    GList **pret = data;\n\n    if (file1 && (!file2 || strcmp(file1->id, file2->id) != 0) &&\n        strcmp (file1->id, EMPTY_SHA1) != 0)\n        *pret = g_list_prepend (*pret, g_strdup(file1->id));\n\n    return 0;\n}\n\nstatic int\ncollect_file_ids_nop (int n, const char *basedir, SeafDirent *files[], void *data)\n{\n    return 0;\n}\n\nstatic int\ncollect_dir_ids (int n, const char *basedir, SeafDirent *dirs[], void *data,\n                 gboolean *recurse)\n{\n    SeafDirent *dir1 = dirs[0];\n    SeafDirent *dir2 = dirs[1];\n    GList **pret = data;\n\n    if (dir1 && (!dir2 || strcmp(dir1->id, dir2->id) != 0) &&\n        strcmp (dir1->id, EMPTY_SHA1) != 0)\n        *pret = g_list_prepend (*pret, g_strdup(dir1->id));\n\n    return 0;\n}\n\nstatic int\ncalculate_send_object_list (SeafRepo *repo,\n                            const char *server_head,\n                            const char *client_head,\n                            gboolean dir_only,\n                            GList **results)\n{\n    SeafCommit *remote_head = NULL, *master_head = NULL;\n    char *remote_head_root;\n    int ret = 0;\n\n    *results = NULL;\n\n    master_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                  repo->id, repo->version,\n                                                  server_head);\n    if (!master_head) {\n        seaf_warning (\"Server head commit %s:%s not found.\\n\", repo->id, server_head);\n        return -1;\n    }\n\n    if (client_head) {\n        remote_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                      repo->id, repo->version,\n                                                      client_head);\n        if (!remote_head) {\n            ret = -1;\n            goto out;\n        }\n        remote_head_root = remote_head->root_id;\n    } else\n        remote_head_root = EMPTY_SHA1;\n\n    /* Diff won't traverse the root object itself. */\n    if (strcmp (remote_head_root, master_head->root_id) != 0 &&\n        strcmp (master_head->root_id, EMPTY_SHA1) != 0)\n        *results = g_list_prepend (*results, g_strdup(master_head->root_id));\n\n    DiffOptions opts;\n    memset (&opts, 0, sizeof(opts));\n    memcpy (opts.store_id, repo->store_id, 36);\n    opts.version = repo->version;\n    if (!dir_only)\n        opts.file_cb = collect_file_ids;\n    else\n        opts.file_cb = collect_file_ids_nop;\n    opts.dir_cb = collect_dir_ids;\n    opts.data = results;\n\n    const char *trees[2];\n    trees[0] = master_head->root_id;\n    trees[1] = remote_head_root;\n    if (diff_trees (2, trees, &opts) < 0) {\n        seaf_warning (\"Failed to diff remote and master head for repo %.8s.\\n\",\n                      repo->id);\n        string_list_free (*results);\n        ret = -1;\n    }\n\nout:\n    seaf_commit_unref (remote_head);\n    seaf_commit_unref (master_head);\n    return ret;\n}\n\nstatic void\nget_fs_obj_id_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts;\n    char *repo_id;\n    SeafRepo *repo = NULL;\n    gboolean dir_only = FALSE;\n    char *username = NULL;\n\n    const char *server_head = evhtp_kv_find (req->uri->query, \"server-head\");\n    if (server_head == NULL || !is_object_id_valid (server_head)) {\n        char *error = \"Invalid server-head parameter.\\n\";\n        seaf_warning (\"%s\", error);\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *client_head = evhtp_kv_find (req->uri->query, \"client-head\");\n    if (client_head && !is_object_id_valid (client_head)) {\n        char *error = \"Invalid client-head parameter.\\n\";\n        seaf_warning (\"%s\", error);\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *dir_only_arg = evhtp_kv_find (req->uri->query, \"dir-only\");\n    if (dir_only_arg)\n        dir_only = TRUE;\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    GList *list = NULL, *ptr;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to find repo %.8s.\\n\", repo_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    if (calculate_send_object_list (repo, server_head, client_head, dir_only, &list) < 0) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    json_t *obj_array = json_array ();\n\n    for (ptr = list; ptr; ptr = ptr->next) {\n        json_array_append_new (obj_array, json_string (ptr->data));\n        g_free (ptr->data);\n    }\n    g_list_free (list);\n\n    char *obj_list = json_dumps (obj_array, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, obj_list, strlen (obj_list));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (obj_list);\n    json_decref (obj_array);\n\nout:\n    g_free (username);\n    g_strfreev (parts);\n    seaf_repo_unref (repo);\n}\n\ntypedef struct ComputeObjTask {\n    HttpServer *htp_server;\n    char *token;\n    char *repo_id;\n    char *client_head;\n    char *server_head;\n    gboolean dir_only;\n} ComputeObjTask;\n\ntypedef struct CalObjResult {\n    GList *list;\n    gboolean done;\n} CalObjResult;\n\nstatic void\nfree_compute_obj_task(ComputeObjTask *task)\n{\n    if (!task)\n        return;\n\n    if (task->token)\n        g_free(task->token);\n    if (task->repo_id)\n        g_free(task->repo_id);\n    if (task->client_head)\n        g_free(task->client_head);\n    if (task->server_head)\n        g_free(task->server_head);\n    g_free(task);\n}\n\nstatic void\nfree_obj_cal_result (gpointer data)\n{\n    CalObjResult *result = (CalObjResult *)data;\n    if (!result)\n        return;\n\n    if (result->list)\n        g_list_free (result->list);\n\n    g_free(result);\n}\n\nstatic void\ncompute_fs_obj_id (gpointer ptask, gpointer ppara)\n{\n    SeafRepo *repo = NULL;\n    ComputeObjTask *task = ptask;\n    const char *client_head = task->client_head;\n    const char *server_head = task->server_head;\n    char *repo_id = task->repo_id;\n    gboolean dir_only = task->dir_only;\n    HttpServer *htp_server = task->htp_server;\n    CalObjResult *result = NULL;\n\n    pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n    result = g_hash_table_lookup (htp_server->fs_obj_ids, task->token);\n    pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n    if (!result) {\n        goto out;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to find repo %.8s.\\n\", repo_id);\n        goto out;\n    }\n\n    if (calculate_send_object_list (repo, server_head, client_head, dir_only, &result->list) < 0) {\n        pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n        g_hash_table_remove (htp_server->fs_obj_ids, task->token);\n        pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n        goto out;\n    }\n\n    result->done = TRUE;\nout:\n    seaf_repo_unref (repo);\n    free_compute_obj_task(task);\n}\n\nstatic void\nstart_fs_obj_id_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts;\n    char *repo_id;\n    gboolean dir_only = FALSE;\n    json_t *obj;\n\n    const char *server_head = evhtp_kv_find (req->uri->query, \"server-head\");\n    if (server_head == NULL || !is_object_id_valid (server_head)) {\n        char *error = \"Invalid server-head parameter.\\n\";\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *client_head = evhtp_kv_find (req->uri->query, \"client-head\");\n    if (client_head && !is_object_id_valid (client_head)) {\n        char *error = \"Invalid client-head parameter.\\n\";\n        evbuffer_add (req->buffer_out, error, strlen (error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    const char *dir_only_arg = evhtp_kv_find (req->uri->query, \"dir-only\");\n    if (dir_only_arg)\n        dir_only = TRUE;\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    char uuid[37];\n    char *new_token;\n    gen_uuid_inplace (uuid);\n    new_token = g_strndup(uuid, FS_ID_LIST_TOKEN_LEN);\n\n    CalObjResult *result = g_new0(CalObjResult, 1);\n    if (!result) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n    result->done = FALSE;\n\n    ComputeObjTask *task = g_new0 (ComputeObjTask, 1);\n    if (!task) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    task->token = new_token;\n    task->dir_only = dir_only;\n    task->htp_server = htp_server;\n    task->repo_id = g_strdup(repo_id);\n    task->client_head = g_strdup(client_head);\n    task->server_head = g_strdup(server_head);\n\n    pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n    g_hash_table_insert (htp_server->fs_obj_ids, g_strdup(task->token), result);\n    pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n    g_thread_pool_push (htp_server->compute_fs_obj_id_pool, task, NULL);\n    obj = json_object ();\n    json_object_set_new (obj, \"token\", json_string (new_token));\n\n    char *json_str = json_dumps (obj, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, json_str, strlen(json_str));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (json_str);\n    json_decref (obj);\nout:\n    g_strfreev (parts);\n}\n\nstatic void\nquery_fs_obj_id_cb (evhtp_request_t *req, void *arg)\n{\n    json_t *obj;\n    const char *token = NULL;\n    CalObjResult *result = NULL;\n    char **parts;\n    char *repo_id = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    token = evhtp_kv_find (req->uri->query, \"token\");\n    if (!token || strlen(token)!=FS_ID_LIST_TOKEN_LEN) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    obj = json_object ();\n\n    pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n    result = g_hash_table_lookup (htp_server->fs_obj_ids, token);\n    if (!result) {\n        pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n        evhtp_send_reply (req, EVHTP_RES_NOTFOUND);\n        goto out;\n    } else {\n        if (!result->done) {\n            json_object_set_new (obj, \"success\", json_false());\n        } else {\n            json_object_set_new (obj, \"success\", json_true());\n        }\n    }\n    pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n\n    json_object_set_new (obj, \"token\", json_string (token));\n\n    char *json_str = json_dumps (obj, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, json_str, strlen(json_str));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (json_str);\n\nout:\n    if (obj)\n        json_decref (obj);\n    g_strfreev (parts);\n    return;\n}\n\nstatic void\nretrieve_fs_obj_id_cb (evhtp_request_t *req, void *arg)\n{\n    char **parts;\n    const char *token = NULL;\n    char *repo_id = NULL;\n    GList *list = NULL;\n    CalObjResult *result = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, NULL, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    token = evhtp_kv_find (req->uri->query, \"token\");\n    if (!token || strlen(token)!=FS_ID_LIST_TOKEN_LEN) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n    result = g_hash_table_lookup (htp_server->fs_obj_ids, token);\n    if (!result) {\n        pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n        evhtp_send_reply (req, EVHTP_RES_NOTFOUND);\n\n        return;\n    }\n    if (!result->done) {\n        pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n\n        char *error = \"The cauculation task is not completed.\\n\";\n        evbuffer_add (req->buffer_out, error, strlen(error));\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n    list = result->list;\n    pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n\n    GList *ptr;\n    json_t *obj_array = json_array ();\n\n    for (ptr = list; ptr; ptr = ptr->next) {\n        json_array_append_new (obj_array, json_string (ptr->data));\n        g_free (ptr->data);\n    }\n\n    pthread_mutex_lock (&htp_server->fs_obj_ids_lock);\n    g_hash_table_remove (htp_server->fs_obj_ids, token);\n    pthread_mutex_unlock (&htp_server->fs_obj_ids_lock);\n\n    char *obj_list = json_dumps (obj_array, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, obj_list, strlen (obj_list));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (obj_list);\n    json_decref (obj_array);\n\nout:\n    g_strfreev (parts);\n    return;\n}\n\nstatic void\nget_block_cb (evhtp_request_t *req, void *arg)\n{\n    const char *repo_id = NULL;\n    char *block_id = NULL;\n    char *store_id = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n    BlockMetadata *blk_meta = NULL;\n    char *username = NULL;\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n    block_id = parts[3];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    blk_meta = seaf_block_manager_stat_block (seaf->block_mgr,\n                                              store_id, 1, block_id);\n    if (blk_meta == NULL || blk_meta->size <= 0) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    BlockHandle *blk_handle = NULL;\n    blk_handle = seaf_block_manager_open_block(seaf->block_mgr,\n                                               store_id, 1, block_id, BLOCK_READ);\n    if (!blk_handle) {\n        seaf_warning (\"Failed to open block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    void *block_con = g_new0 (char, blk_meta->size);\n    if (!block_con) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %d bytes memeory.\\n\", blk_meta->size);\n        goto free_handle;\n    }\n\n    int rsize = seaf_block_manager_read_block (seaf->block_mgr,\n                                               blk_handle, block_con,\n                                               blk_meta->size);\n    if (rsize != blk_meta->size) {\n        seaf_warning (\"Failed to read block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n    } else {\n        evbuffer_add (req->buffer_out, block_con, blk_meta->size);\n        evhtp_send_reply (req, EVHTP_RES_OK);\n    }\n    g_free (block_con);\n    send_statistic_msg (store_id, username, \"sync-file-download\", (guint64)rsize);\n\nfree_handle:\n    seaf_block_manager_close_block (seaf->block_mgr, blk_handle);\n    seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle);\n\nout:\n    g_free (username);\n    g_free (blk_meta);\n    g_free (store_id);\n    g_strfreev (parts);\n}\n\nstatic void\nput_send_block_cb (evhtp_request_t *req, void *arg)\n{\n    const char *repo_id = NULL;\n    char *block_id = NULL;\n    char *store_id = NULL;\n    char *username = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = NULL;\n    void *blk_con = NULL;\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n    block_id = parts[3];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"upload\", FALSE);\n    if (perm_status == EVHTP_RES_FORBIDDEN) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    int blk_len = evbuffer_get_length (req->buffer_in);\n    if (blk_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    blk_con = g_new0 (char, blk_len);\n    if (!blk_con) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %d bytes memory.\\n\", blk_len);\n        goto out;\n    }\n\n    evbuffer_remove (req->buffer_in, blk_con, blk_len);\n\n    BlockHandle *blk_handle = NULL;\n    blk_handle = seaf_block_manager_open_block (seaf->block_mgr,\n                                                store_id, 1, block_id, BLOCK_WRITE);\n    if (blk_handle == NULL) {\n        seaf_warning (\"Failed to open block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    if (seaf_block_manager_write_block (seaf->block_mgr, blk_handle,\n                                        blk_con, blk_len) != blk_len) {\n        seaf_warning (\"Failed to write block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_block_manager_close_block (seaf->block_mgr, blk_handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle);\n        goto out;\n    }\n\n    if (seaf_block_manager_close_block (seaf->block_mgr, blk_handle) < 0) {\n        seaf_warning (\"Failed to close block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle);\n        goto out;\n    }\n\n    if (seaf_block_manager_commit_block (seaf->block_mgr,\n                                         blk_handle) < 0) {\n        seaf_warning (\"Failed to commit block %.8s:%s.\\n\", store_id, block_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle);\n        goto out;\n    }\n\n    seaf_block_manager_block_handle_free (seaf->block_mgr, blk_handle);\n\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    send_statistic_msg (store_id, username, \"sync-file-upload\", (guint64)blk_len);\n\nout:\n    g_free (username);\n    g_free (store_id);\n    g_strfreev (parts);\n    g_free (blk_con);\n}\n\nstatic void\nblock_oper_cb (evhtp_request_t *req, void *arg)\n{\n    htp_method req_method = evhtp_request_get_method (req);\n\n    if (req_method == htp_method_GET) {\n        get_block_cb (req, arg);\n    } else if (req_method == htp_method_PUT) {\n        put_send_block_cb (req, arg);\n    }\n}\n\nstatic void\npost_check_exist_cb (evhtp_request_t *req, void *arg, CheckExistType type)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    char *repo_id = parts[1];\n    char *store_id = NULL;\n    char *username = NULL;\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    size_t list_len = evbuffer_get_length (req->buffer_in);\n    if (list_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    char *obj_list_con = g_new0 (char, list_len);\n    if (!obj_list_con) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %zu bytes memory.\\n\", list_len);\n        goto out;\n    }\n\n    json_error_t jerror;\n    evbuffer_remove (req->buffer_in, obj_list_con, list_len);\n    json_t *obj_array = json_loadb (obj_list_con, list_len, 0, &jerror);\n    g_free (obj_list_con);\n\n    if (!obj_array) {\n        seaf_warning (\"dump obj_id to json failed, error: %s\\n\", jerror.text);\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        return;\n    }\n\n    json_t *obj = NULL;\n    gboolean ret = TRUE;\n    const char *obj_id = NULL;\n    int index = 0;\n\n    int array_size = json_array_size (obj_array);\n    json_t *needed_objs = json_array();\n\n    for (; index < array_size; ++index) {\n        obj = json_array_get (obj_array, index);\n        obj_id = json_string_value (obj);\n        if (!is_object_id_valid (obj_id))\n            continue;\n\n        if (type == CHECK_FS_EXIST) {\n            ret = seaf_fs_manager_object_exists (seaf->fs_mgr, store_id, 1,\n                                                 obj_id);\n        } else if (type == CHECK_BLOCK_EXIST) {\n            ret = seaf_block_manager_block_exists (seaf->block_mgr, store_id, 1,\n                                                   obj_id);\n        }\n\n        if (!ret) {\n            json_array_append (needed_objs, obj);\n        }\n    }\n\n    char *ret_array = json_dumps (needed_objs, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, ret_array, strlen (ret_array));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (ret_array);\n    json_decref (needed_objs);\n    json_decref (obj_array);\n\nout:\n    g_free (username);\n    g_free (store_id);\n    g_strfreev (parts);\n}\n\nstatic void\npost_check_fs_cb (evhtp_request_t *req, void *arg)\n{\n   post_check_exist_cb (req, arg, CHECK_FS_EXIST);\n}\n\nstatic void\npost_check_block_cb (evhtp_request_t *req, void *arg)\n{\n   post_check_exist_cb (req, arg, CHECK_BLOCK_EXIST);\n}\n\nstatic void\npost_recv_fs_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    const char *repo_id = parts[1];\n    char *store_id = NULL;\n    char *username = NULL;\n    FsHdr *hdr = NULL;\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"upload\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    int fs_con_len = evbuffer_get_length (req->buffer_in);\n    if (fs_con_len < sizeof(FsHdr)) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    hdr = g_new0 (FsHdr, 1);\n    if (!hdr) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    char obj_id[41];\n    void *obj_con = NULL;\n    int con_len;\n\n    while (fs_con_len > 0) {\n        if (fs_con_len < sizeof(FsHdr)) {\n            seaf_warning (\"Bad fs object content format from %.8s:%s.\\n\",\n                          repo_id, username);\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            break;\n        }\n\n        evbuffer_remove (req->buffer_in, hdr, sizeof(FsHdr));\n        con_len = ntohl (hdr->obj_size);\n        memcpy (obj_id, hdr->obj_id, 40);\n        obj_id[40] = 0;\n\n        if (!is_object_id_valid (obj_id)) {\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            break;\n        }\n\n        obj_con = g_new0 (char, con_len);\n        if (!obj_con) {\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n            break;\n        }\n        evbuffer_remove (req->buffer_in, obj_con, con_len);\n\n        if (seaf_obj_store_write_obj (seaf->fs_mgr->obj_store,\n                                      store_id, 1, obj_id, obj_con,\n                                      con_len, FALSE) < 0) {\n            seaf_warning (\"Failed to write fs object %.8s to disk.\\n\",\n                          obj_id);\n            g_free (obj_con);\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n            break;\n        }\n\n        fs_con_len -= (con_len + sizeof(FsHdr));\n        g_free (obj_con);\n    }\n\n    if (fs_con_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_OK);\n    }\n\nout:\n    g_free (store_id);\n    g_free (hdr);\n    g_free (username);\n    g_strfreev (parts);\n}\n\n#define MAX_OBJECT_PACK_SIZE (1 << 20) /* 1MB */\n\nstatic void\npost_pack_fs_cb (evhtp_request_t *req, void *arg)\n{\n    HttpServer *htp_server = seaf->http_server->priv;\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    const char *repo_id = parts[1];\n    char *store_id = NULL;\n    char *username = NULL;\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    int fs_id_list_len = evbuffer_get_length (req->buffer_in);\n    if (fs_id_list_len == 0) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    char *fs_id_list = g_new0 (char, fs_id_list_len);\n    if (!fs_id_list) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        seaf_warning (\"Failed to allocate %d bytes memory.\\n\", fs_id_list_len);\n        goto out;\n    }\n\n    json_error_t jerror;\n    evbuffer_remove (req->buffer_in, fs_id_list, fs_id_list_len);\n    json_t *fs_id_array = json_loadb (fs_id_list, fs_id_list_len, 0, &jerror);\n\n    g_free (fs_id_list);\n\n    if (!fs_id_array) {\n        seaf_warning (\"dump fs obj_id from json failed, error: %s\\n\", jerror.text);\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        goto out;\n    }\n\n    json_t *obj = NULL;\n    const char *obj_id = NULL;\n    int index = 0;\n    void *fs_data = NULL;\n    int data_len;\n    int data_len_net;\n    int total_size = 0;\n\n    int array_size = json_array_size (fs_id_array);\n\n    for (; index < array_size; ++index) {\n        obj = json_array_get (fs_id_array, index);\n        obj_id = json_string_value (obj);\n\n        if (!is_object_id_valid (obj_id)) {\n            seaf_warning (\"Invalid fs id %s.\\n\", obj_id);\n            evhtp_send_reply (req, EVHTP_RES_BADREQ);\n            json_decref (fs_id_array);\n            goto out;\n        }\n        if (seaf_obj_store_read_obj (seaf->fs_mgr->obj_store, store_id, 1,\n                                     obj_id, &fs_data, &data_len) < 0) {\n            seaf_warning (\"Failed to read seafile object %s:%s.\\n\", store_id, obj_id);\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n            json_decref (fs_id_array);\n            goto out;\n        }\n\n        evbuffer_add (req->buffer_out, obj_id, 40);\n        data_len_net = htonl (data_len);\n        evbuffer_add (req->buffer_out, &data_len_net, 4);\n        evbuffer_add (req->buffer_out, fs_data, data_len);\n\n        total_size += data_len;\n        g_free (fs_data);\n\n        if (total_size >= MAX_OBJECT_PACK_SIZE)\n            break;\n    }\n\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    json_decref (fs_id_array);\nout:\n    g_free (username);\n    g_free (store_id);\n    g_strfreev (parts);\n}\n\nstatic void\nget_block_map_cb (evhtp_request_t *req, void *arg)\n{\n    const char *repo_id = NULL;\n    char *file_id = NULL;\n    char *store_id = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n    Seafile *file = NULL;\n    char *block_id;\n    BlockMetadata *blk_meta = NULL;\n    json_t *array = NULL;\n    char *data = NULL;\n    char *username = NULL;\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n    file_id = parts[3];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    int perm_status = check_permission (htp_server, repo_id, username,\n                                        \"download\", FALSE);\n    if (perm_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, EVHTP_RES_FORBIDDEN);\n        goto out;\n    }\n\n    store_id = get_repo_store_id (htp_server, repo_id);\n    if (!store_id) {\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr, store_id, 1, file_id);\n    if (!file) {\n        evhtp_send_reply (req, EVHTP_RES_NOTFOUND);\n        goto out;\n    }\n\n    array = json_array ();\n\n    int i;\n    for (i = 0; i < file->n_blocks; ++i) {\n        block_id = file->blk_sha1s[i];\n        blk_meta = seaf_block_manager_stat_block (seaf->block_mgr,\n                                                  store_id, 1, block_id);\n        if (blk_meta == NULL) {\n            seaf_warning (\"Failed to find block %s/%s\\n\", store_id, block_id);\n            evhtp_send_reply (req, EVHTP_RES_SERVERR);\n            g_free (blk_meta);\n            goto out;\n        }\n        json_array_append_new (array, json_integer(blk_meta->size));\n        g_free (blk_meta);\n    }\n\n    data = json_dumps (array, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, data, strlen (data));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    g_free (username);\n    g_free (store_id);\n    seafile_unref (file);\n    if (array)\n        json_decref (array);\n    if (data)\n        free (data);\n    g_strfreev (parts);\n}\n\nstatic void\nget_jwt_token_cb (evhtp_request_t *req, void *arg)\n{\n    const char *repo_id = NULL;\n    HttpServer *htp_server = seaf->http_server->priv;\n    json_t *obj = NULL;\n    char *data = NULL;\n    char *username = NULL;\n    char *jwt_token = NULL;\n\n    char **parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    repo_id = parts[1];\n\n    int token_status = validate_token (htp_server, req, repo_id, &username, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        goto out;\n    }\n\n    if (!seaf->notif_mgr) {\n        evhtp_send_reply (req, EVHTP_RES_NOTFOUND);\n        goto out;\n    }\n\n    jwt_token = seaf_gen_notif_server_jwt (repo_id, username);\n    if (!jwt_token) {\n        seaf_warning (\"Failed to gen jwt token for repo %s\\n\", repo_id);\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        goto out;\n    }\n\n    obj = json_object ();\n    json_object_set_new (obj, \"jwt_token\", json_string (jwt_token));\n\n    data = json_dumps (obj, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, data, strlen (data));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\nout:\n    g_free (jwt_token);\n    g_free (username);\n    if (obj)\n        json_decref (obj);\n    if (data)\n        free (data);\n    g_strfreev (parts);\n}\n\nstatic json_t *\nfill_obj_from_seafilerepo (SeafileRepo *srepo, GHashTable *table)\n{\n    int version = 0;\n    char *repo_id = NULL;\n    char *commit_id = NULL;\n    char *repo_name = NULL;\n    char *permission = NULL;\n    char *owner = NULL;\n    char *type = NULL;\n    gint64 last_modify = 0;\n    json_t *obj = NULL;\n\n    g_object_get (srepo, \"version\", &version,\n                         \"id\", &repo_id,\n                         \"head_cmmt_id\", &commit_id,\n                         \"name\", &repo_name,\n                         \"last_modify\", &last_modify,\n                         \"permission\", &permission,\n                         \"user\", &owner,\n                         \"repo_type\", &type,\n                         NULL);\n\n    if (!repo_id)\n        goto out;\n    if (type) {\n        g_free (repo_id);\n        goto out;\n    }\n    //the repo_id will be free when the table is destroyed.\n    if (g_hash_table_lookup (table, repo_id)) {\n        g_free (repo_id);\n        goto out;\n    }\n    g_hash_table_insert (table, repo_id, repo_id);\n    obj = json_object ();\n    json_object_set_new (obj, \"version\", json_integer (version));\n    json_object_set_new (obj, \"id\", json_string (repo_id));\n    json_object_set_new (obj, \"head_commit_id\", json_string (commit_id));\n    json_object_set_new (obj, \"name\", json_string (repo_name));\n    json_object_set_new (obj, \"mtime\", json_integer (last_modify));\n    json_object_set_new (obj, \"permission\", json_string (permission));\n    json_object_set_new (obj, \"owner\", json_string (owner));\n\nout:\n    g_free (commit_id);\n    g_free (repo_name);\n    g_free (permission);\n    g_free (owner);\n    g_free (type);\n    return obj;\n}\n\nstatic GHashTable *\nfilter_group_repos (GList *repos)\n{\n    if (!repos)\n        return NULL;\n\n    SeafileRepo *srepo = NULL;\n    SeafileRepo *srepo_tmp = NULL;\n    GList *iter;\n    GHashTable *table = NULL;\n    char *permission = NULL;\n    char *permission_prev = NULL;\n    char *repo_id = NULL;\n    char *type = NULL;\n\n    table = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                   g_free,\n                                   NULL);\n\n    for (iter = repos; iter; iter = iter->next) {\n        srepo = iter->data;\n        g_object_get (srepo, \"id\", &repo_id,\n                             \"permission\", &permission,\n                             \"repo_type\", &type,\n                             NULL);\n        if (type) {\n            g_free (repo_id);\n            g_free (permission);\n            g_free (type);\n            g_object_unref (srepo);\n            continue;\n        }\n        srepo_tmp = g_hash_table_lookup (table, repo_id);\n        if (srepo_tmp) {\n            g_object_get (srepo_tmp, \"permission\", &permission_prev,\n                          NULL);\n            if (g_strcmp0 (permission, \"rw\") == 0 && g_strcmp0 (permission_prev, \"r\") == 0) {\n                g_object_unref (srepo_tmp);\n                g_hash_table_remove (table, repo_id);\n                g_hash_table_insert (table, g_strdup (repo_id), srepo);\n            } else {\n                g_object_unref (srepo);\n            }\n            g_free (permission_prev);\n        } else {\n            g_hash_table_insert (table, g_strdup (repo_id), srepo);\n        }\n        g_free (repo_id);\n        g_free (permission);\n        g_free (type);\n    }\n\n    return table;\n}\n\nstatic void\ngroup_repos_to_json (json_t *repo_array, GHashTable *group_repos,\n                     GHashTable *obtained_repos)\n{\n    GHashTableIter iter;\n    gpointer key, value;\n    SeafileRepo *srepo = NULL;\n    json_t *obj;\n\n    g_hash_table_iter_init (&iter, group_repos);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        srepo = value;\n        obj = fill_obj_from_seafilerepo (srepo, obtained_repos);\n        if (!obj) {\n            g_object_unref (srepo);\n            continue;\n        }\n        json_object_set_new (obj, \"type\", json_string (\"grepo\"));\n\n        json_array_append_new (repo_array, obj);\n        g_object_unref (srepo);\n    }\n}\n\nstatic void\nget_accessible_repo_list_cb (evhtp_request_t *req, void *arg)\n{\n    GList *iter;\n    HttpServer *htp_server = seaf->http_server->priv;\n    SeafRepo *repo = NULL;\n    char *user = NULL;\n    GList *repos = NULL;\n    int org_id = -1;\n    const char *repo_id = evhtp_kv_find (req->uri->query, \"repo_id\");\n\n    if (!repo_id || !is_uuid_valid (repo_id)) {\n        evhtp_send_reply (req, EVHTP_RES_BADREQ);\n        seaf_warning (\"Invalid repo id.\\n\");\n        return;\n    }\n\n    int token_status = validate_token (htp_server, req, repo_id, &user, FALSE);\n    if (token_status != EVHTP_RES_OK) {\n        evhtp_send_reply (req, token_status);\n        return;\n    }\n\n    json_t *obj;\n    json_t *repo_array = json_array ();\n\n    gboolean db_err = FALSE;\n    GHashTable *obtained_repos = NULL;\n    char *repo_id_tmp = NULL;\n    obtained_repos = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                            g_free,\n                                            NULL);\n    //get personal repo list\n    repos = seaf_repo_manager_get_repos_by_owner (seaf->repo_mgr, user, 0, -1, -1, &db_err);\n    if (db_err)\n        goto out;\n\n    for (iter = repos; iter; iter = iter->next) {\n        repo = iter->data;\n        if (repo->type) {\n            seaf_repo_unref (repo);\n            continue;\n        }\n\n        if (!repo->is_corrupted) {\n            if (!g_hash_table_lookup (obtained_repos, repo->id)) {\n                repo_id_tmp = g_strdup (repo->id);\n                g_hash_table_insert (obtained_repos, repo_id_tmp, repo_id_tmp);\n            }\n            obj = json_object ();\n            json_object_set_new (obj, \"version\", json_integer (repo->version));\n            json_object_set_new (obj, \"id\", json_string (repo->id));\n            json_object_set_new (obj, \"head_commit_id\", json_string (repo->head->commit_id));\n            json_object_set_new (obj, \"name\", json_string (repo->name));\n            json_object_set_new (obj, \"mtime\", json_integer (repo->last_modify));\n            json_object_set_new (obj, \"permission\", json_string (\"rw\"));\n            json_object_set_new (obj, \"type\", json_string (\"repo\"));\n            json_object_set_new (obj, \"owner\", json_string (user));\n\n            json_array_append_new (repo_array, obj);\n        }\n        seaf_repo_unref (repo);\n    }\n    g_list_free (repos);\n\n    GError *error = NULL;\n    SeafileRepo *srepo = NULL;\n    //get shared repo list\n    repos = seaf_share_manager_list_share_repos (seaf->share_mgr, user, \"to_email\", -1, -1, &db_err);\n    if (db_err)\n        goto out;\n\n    for (iter = repos; iter; iter = iter->next) {\n        srepo = iter->data;\n        obj = fill_obj_from_seafilerepo (srepo, obtained_repos);\n        if (!obj) {\n            g_object_unref (srepo);\n            continue;\n        }\n        json_object_set_new (obj, \"type\", json_string (\"srepo\"));\n\n        json_array_append_new (repo_array, obj);\n        g_object_unref (srepo);\n    }\n    g_list_free (repos);\n\n    //get group repo list\n    GHashTable *group_repos = NULL;\n    repos = seaf_get_group_repos_by_user (seaf->repo_mgr, user, org_id, &error);\n    if (error) {\n        g_clear_error (&error);\n        goto out;\n    }\n\n    if (repos) {\n        group_repos = filter_group_repos (repos);\n        group_repos_to_json (repo_array, group_repos, obtained_repos);\n        g_hash_table_destroy (group_repos);\n        g_list_free (repos);\n    }\n\n    //get inner public repo list\n    repos = seaf_repo_manager_list_inner_pub_repos (seaf->repo_mgr, &db_err);\n    if (db_err)\n        goto out;\n\n    for (iter = repos; iter; iter = iter->next) {\n        srepo = iter->data;\n        obj = fill_obj_from_seafilerepo (srepo, obtained_repos);\n        if (!obj) {\n            g_object_unref (srepo);\n            continue;\n        }\n        json_object_set_new (obj, \"type\", json_string (\"grepo\"));\n        json_object_set_new (obj, \"owner\", json_string (\"Organization\"));\n\n        json_array_append_new (repo_array, obj);\n        g_object_unref (srepo);\n    }\n    g_list_free (repos);\n\nout:\n    g_free (user);\n    g_hash_table_destroy (obtained_repos);\n\n    if (db_err) {\n        json_decref (repo_array);\n        seaf_warning (\"DB error when get accessible repo list.\\n\");\n        evhtp_send_reply (req, EVHTP_RES_SERVERR);\n        return;\n    }\n\n    char *json_str = json_dumps (repo_array, JSON_COMPACT);\n    evbuffer_add (req->buffer_out, json_str, strlen(json_str));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n\n    g_free (json_str);\n    json_decref (repo_array);\n}\n\nstatic evhtp_res\nhttp_request_finish_cb (evhtp_request_t *req, void *arg)\n{\n    RequestInfo *info = arg;\n    struct timeval end, intv;\n\n    seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr);\n\n    if (!info)\n        return EVHTP_RES_OK;\n\n    g_free (info->url_path);\n    g_free (info);\n    return EVHTP_RES_OK;\n}\n\nstatic evhtp_res\nhttp_request_start_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg)\n{\n    RequestInfo *info = NULL;\n    info = g_new0 (RequestInfo, 1);\n    info->url_path = g_strdup (req->uri->path->full);\n\n    gettimeofday (&info->start, NULL);\n\n    seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr);\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, http_request_finish_cb, info);\n    req->cbarg = info;\n\n    return EVHTP_RES_OK;\n}\n\nstatic void\nhttp_request_init (HttpServerStruct *server)\n{\n    HttpServer *priv = server->priv;\n    evhtp_callback_t *cb;\n\n    cb = evhtp_set_cb (priv->evhtp,\n                  GET_PROTO_PATH, get_protocol_cb,\n                  NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_CHECK_QUOTA_REGEX, get_check_quota_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        OP_PERM_CHECK_REGEX, get_check_permission_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        HEAD_COMMIT_OPER_REGEX, head_commit_oper_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_HEAD_COMMITS_MULTI_REGEX, head_commits_multi_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        COMMIT_OPER_REGEX, commit_oper_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_FS_OBJ_ID_REGEX, get_fs_obj_id_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    // evhtp_set_regex_cb (priv->evhtp,\n    //                     START_FS_OBJ_ID_REGEX, start_fs_obj_id_cb,\n    //                     priv);\n\n    // evhtp_set_regex_cb (priv->evhtp,\n    //                     QUERY_FS_OBJ_ID_REGEX, query_fs_obj_id_cb,\n    //                     priv);\n\n    // evhtp_set_regex_cb (priv->evhtp,\n    //                     RETRIEVE_FS_OBJ_ID_REGEX, retrieve_fs_obj_id_cb,\n    //                     priv);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        BLOCK_OPER_REGEX, block_oper_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        POST_CHECK_FS_REGEX, post_check_fs_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        POST_CHECK_BLOCK_REGEX, post_check_block_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        POST_RECV_FS_REGEX, post_recv_fs_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        POST_PACK_FS_REGEX, post_pack_fs_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_BLOCK_MAP_REGEX, get_block_map_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_JWT_TOKEN_REGEX, get_jwt_token_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    cb = evhtp_set_regex_cb (priv->evhtp,\n                        GET_ACCESSIBLE_REPO_LIST_REGEX, get_accessible_repo_list_cb,\n                        NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, http_request_start_cb, NULL);\n\n    /* Web access file */\n    access_file_init (priv->evhtp);\n\n    /* Web upload file */\n    if (upload_file_init (priv->evhtp, server->http_temp_dir) < 0)\n        exit(-1);\n}\n\nstatic void\ntoken_cache_value_free (gpointer data)\n{\n    TokenInfo *token_info = (TokenInfo *)data;\n    if (token_info != NULL) {\n        g_free (token_info->repo_id);\n        g_free (token_info->email);\n        g_free (token_info);\n    }\n}\n\nstatic gboolean\nis_token_expire (gpointer key, gpointer value, gpointer arg)\n{\n    TokenInfo *token_info = (TokenInfo *)value;\n\n    if(token_info && token_info->expire_time <= (gint64)time(NULL)) {\n        return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic void\nperm_cache_value_free (gpointer data)\n{\n    PermInfo *perm_info = data;\n    g_free (perm_info);\n}\n\nstatic gboolean\nis_perm_expire (gpointer key, gpointer value, gpointer arg)\n{\n    PermInfo *perm_info = (PermInfo *)value;\n\n    if(perm_info && perm_info->expire_time <= (gint64)time(NULL)) {\n        return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic gboolean\nis_vir_repo_info_expire (gpointer key, gpointer value, gpointer arg)\n{\n    VirRepoInfo *vinfo = (VirRepoInfo *)value;\n\n    if(vinfo && vinfo->expire_time <= (gint64)time(NULL)) {\n        return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic void\nfree_vir_repo_info (gpointer data)\n{\n    if (!data)\n        return;\n\n    VirRepoInfo *vinfo = data;\n\n    if (vinfo->store_id)\n        g_free (vinfo->store_id);\n\n    g_free (vinfo);\n}\n\nstatic void\nremove_expire_cache_cb (evutil_socket_t sock, short type, void *data)\n{\n    HttpServer *htp_server = data;\n\n    pthread_mutex_lock (&htp_server->token_cache_lock);\n    g_hash_table_foreach_remove (htp_server->token_cache, is_token_expire, NULL);\n    pthread_mutex_unlock (&htp_server->token_cache_lock);\n\n    pthread_mutex_lock (&htp_server->perm_cache_lock);\n    g_hash_table_foreach_remove (htp_server->perm_cache, is_perm_expire, NULL);\n    pthread_mutex_unlock (&htp_server->perm_cache_lock);\n\n    pthread_mutex_lock (&htp_server->vir_repo_info_cache_lock);\n    g_hash_table_foreach_remove (htp_server->vir_repo_info_cache,\n                                 is_vir_repo_info_expire, NULL);\n    pthread_mutex_unlock (&htp_server->vir_repo_info_cache_lock);\n}\n\nstatic void *\nhttp_server_run (void *arg)\n{\n    HttpServerStruct *server = arg;\n    HttpServer *priv = server->priv;\n\n    priv->evbase = event_base_new();\n    priv->evhtp = evhtp_new(priv->evbase, NULL);\n\n    if (evhtp_bind_socket(priv->evhtp,\n                          server->bind_addr,\n                          server->bind_port, 128) < 0) {\n        seaf_warning (\"Could not bind socket: %s\\n\", strerror (errno));\n        exit(-1);\n    }\n\n    http_request_init (server);\n\n    evhtp_use_threads (priv->evhtp, NULL, server->worker_threads, NULL);\n\n    struct timeval tv;\n    tv.tv_sec = CLEANING_INTERVAL_SEC;\n    tv.tv_usec = 0;\n    priv->reap_timer = event_new (priv->evbase,\n                                  -1,\n                                  EV_PERSIST,\n                                  remove_expire_cache_cb,\n                                  priv);\n    evtimer_add (priv->reap_timer, &tv);\n\n    event_base_loop (priv->evbase, 0);\n\n    return NULL;\n}\n\nHttpServerStruct *\nseaf_http_server_new (struct _SeafileSession *session)\n{\n    HttpServerStruct *server = g_new0 (HttpServerStruct, 1);\n    HttpServer *priv = g_new0 (HttpServer, 1);\n\n    priv->evbase = NULL;\n    priv->evhtp = NULL;\n\n    load_http_config (server, session);\n\n    priv->token_cache = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                               g_free, token_cache_value_free);\n    pthread_mutex_init (&priv->token_cache_lock, NULL);\n\n    priv->perm_cache = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                              g_free, perm_cache_value_free);\n    pthread_mutex_init (&priv->perm_cache_lock, NULL);\n\n    priv->vir_repo_info_cache = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                       g_free, free_vir_repo_info);\n    pthread_mutex_init (&priv->vir_repo_info_cache_lock, NULL);\n\n    server->http_temp_dir = g_build_filename (session->seaf_dir, \"httptemp\", NULL);\n\n    // priv->compute_fs_obj_id_pool = g_thread_pool_new (compute_fs_obj_id, NULL,\n    //                                                   FS_ID_LIST_MAX_WORKERS, FALSE, NULL);\n\n    // priv->fs_obj_ids = g_hash_table_new_full (g_str_hash, g_str_equal,\n    //                                           g_free, free_obj_cal_result);\n    // pthread_mutex_init (&priv->fs_obj_ids_lock, NULL);\n\n    server->seaf_session = session;\n    server->priv = priv;\n\n    return server;\n}\n\ngint64\nget_last_modify_time (const char *path)\n{\n    struct stat st;\n    if (stat (path, &st) < 0) {\n        return -1;\n    }\n\n    return st.st_mtime;\n}\n\nstatic gint64\ncheck_httptemp_dir_recursive (const char *parent_dir, gint64 expired_time)\n{\n    char *full_path;\n    const char *dname;\n    gint64 cur_time;\n    gint64 last_modify = -1;\n    GDir *dir = NULL;\n    gint64 file_num = 0;\n\n    dir = g_dir_open (parent_dir, 0, NULL);\n\n    while ((dname = g_dir_read_name(dir)) != NULL) {\n        full_path = g_build_path (\"/\", parent_dir, dname, NULL);\n\n        if (g_file_test (full_path, G_FILE_TEST_IS_DIR)) {\n            file_num += check_httptemp_dir_recursive (full_path, expired_time);\n        } else {\n            cur_time = time (NULL);\n            last_modify = get_last_modify_time (full_path);\n            if (last_modify == -1) {\n                g_free (full_path);\n                continue;\n            }\n            /*remove blokc cache from local*/\n            if (last_modify + expired_time <= cur_time) {\n                g_unlink (full_path);\n                file_num ++;\n            }\n        }\n        g_free (full_path);\n    }\n\n    g_dir_close (dir);\n\n    return file_num;\n}\n\nstatic int\nscan_httptemp_dir (const char *httptemp_dir, gint64 expired_time)\n{\n    return check_httptemp_dir_recursive (httptemp_dir, expired_time);\n}\n\nstatic void *\ncleanup_expired_httptemp_file (void *arg)\n{\n    GError *error = NULL;\n    HttpServerStruct *server = arg;\n    SeafileSession *session = server->seaf_session;\n    gint64 ttl = 0;\n    gint64 scan_interval = 0;\n    gint64 file_num = 0;\n\n    ttl = fileserver_config_get_int64 (session->config, HTTP_TEMP_FILE_TTL, &error);\n    if (error) {\n        ttl = HTTP_TEMP_FILE_DEFAULT_TTL;\n        g_clear_error (&error);\n    }\n\n    scan_interval = fileserver_config_get_int64 (session->config, HTTP_SCAN_INTERVAL, &error);\n    if (error) {\n        scan_interval = HTTP_TEMP_FILE_SCAN_INTERVAL;\n        g_clear_error (&error);\n    }\n\n    while (TRUE) {\n        sleep (scan_interval);\n        file_num = scan_httptemp_dir (server->http_temp_dir, ttl);\n        if (file_num) {\n            seaf_message (\"Clean up %ld http temp files\\n\", file_num);\n            file_num = 0;\n        }\n    }\n\n    return NULL;\n}\n\nint\nseaf_http_server_start (HttpServerStruct *server)\n{\n   int ret = pthread_create (&server->priv->thread_id, NULL, http_server_run, server);\n   if (ret != 0)\n       return -1;\n\n   pthread_detach (server->priv->thread_id);\n\n   pthread_t tid;\n   ret = pthread_create (&tid, NULL, cleanup_expired_httptemp_file, server);\n   if (ret != 0)\n       return -1;\n\n   pthread_detach (tid);\n   return 0;\n}\n\nint\nseaf_http_server_invalidate_tokens (HttpServerStruct *htp_server,\n                                    const GList *tokens)\n{\n    const GList *p;\n\n    pthread_mutex_lock (&htp_server->priv->token_cache_lock);\n    for (p = tokens; p; p = p->next) {\n        const char *token = (char *)p->data;\n        g_hash_table_remove (htp_server->priv->token_cache, token);\n    }\n    pthread_mutex_unlock (&htp_server->priv->token_cache_lock);\n    return 0;\n}\n\n#endif\n"
  },
  {
    "path": "server/http-server.h",
    "content": "#ifndef HTTP_SERVER_H\n#define HTTP_SERVER_H\n\n#ifdef HAVE_EVHTP\n#include <glib.h>\n\n#include \"metric-mgr.h\"\n\nstruct _SeafileSession;\n\nstruct _HttpServer;\n\nstruct _HttpServerStruct {\n    struct _SeafileSession *seaf_session;\n\n    struct _HttpServer *priv;\n\n    char *bind_addr;\n    int bind_port;\n    char *http_temp_dir;        /* temp dir for file upload */\n    char *windows_encoding;\n    int worker_threads;\n    int cluster_shared_temp_file_mode;\n\n    gboolean verify_client_blocks;\n};\n\ntypedef struct RequestInfo {\n    struct timeval start;\n    char *url_path;\n} RequestInfo;\n\ntypedef struct _HttpServerStruct HttpServerStruct;\n\nHttpServerStruct *\nseaf_http_server_new (struct _SeafileSession *session);\n\nint\nseaf_http_server_start (HttpServerStruct *htp_server);\n\nint\nseaf_http_server_invalidate_tokens (HttpServerStruct *htp_server,\n                                    const GList *tokens);\n\nvoid\nsend_statistic_msg (const char *repo_id, char *user, char *operation, guint64 bytes);\n\nchar *\nget_client_ip_addr (void *data);\n\n#endif\n\n#endif\n"
  },
  {
    "path": "server/http-status-codes.h",
    "content": "#ifndef HTTP_STATUS_CODES_H\n#define HTTP_STATUS_CODES_H\n\n/* Seafile specific http status codes. */\n\n#define SEAF_HTTP_RES_FORBIDDEN 403\n#define SEAF_HTTP_RES_BADFILENAME 440\n#define SEAF_HTTP_RES_EXISTS 441\n#define SEAF_HTTP_RES_NOT_EXISTS 441\n#define SEAF_HTTP_RES_TOOLARGE 442\n#define SEAF_HTTP_RES_NOQUOTA 443\n#define SEAF_HTTP_RES_REPO_DELETED 444\n#define SEAF_HTTP_RES_REPO_CORRUPTED 445\n#define SEAF_HTTP_RES_BLOCK_MISSING 446\n\n\n#endif\n"
  },
  {
    "path": "server/http-tx-mgr.c",
    "content": "#include \"common.h\"\n\n#include <pthread.h>\n#include <curl/curl.h>\n#include <jansson.h>\n\n#include <timer.h>\n#include <jwt.h>\n\n#include \"seafile-session.h\"\n#include \"http-tx-mgr.h\"\n\n#include \"utils.h\"\n#include \"seaf-db.h\"\n#include \"seafile-error.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_TRANSFER\n#include \"log.h\"\n\n/* Http connection and connection pool. */\n\nstruct _Connection {\n    CURL *curl;\n    gint64 ctime;               /* Used to clean up unused connection. */\n    gboolean release;           /* If TRUE, the connection will be released. */\n};\n\nstruct _ConnectionPool {\n    GQueue *queue;\n    pthread_mutex_t lock;\n};\n\nstatic Connection *\nconnection_new ()\n{\n    Connection *conn = g_new0 (Connection, 1);\n    if (!conn)\n        return NULL;\n\n    conn->curl = curl_easy_init();\n    conn->ctime = (gint64)time(NULL);\n\n    return conn;\n}\n\nstatic void\nconnection_free (Connection *conn)\n{\n    if (!conn)\n        return;\n\n    curl_easy_cleanup (conn->curl);\n    g_free (conn);\n}\n\nConnectionPool *\nconnection_pool_new ()\n{\n    ConnectionPool *pool = g_new0 (ConnectionPool, 1);\n    if (!pool)\n        return NULL;\n\n    pool->queue = g_queue_new ();\n    pthread_mutex_init (&pool->lock, NULL);\n    return pool;\n}\n\nvoid\nconnection_pool_free (ConnectionPool *pool)\n{\n    if (!pool)\n        return;\n\n    g_queue_free (pool->queue);\n    g_free (pool);\n}\n\nConnection *\nconnection_pool_get_connection (ConnectionPool *pool)\n{\n    Connection *conn = NULL;\n\n    pthread_mutex_lock (&pool->lock);\n    conn = g_queue_pop_head (pool->queue);\n    if (!conn) {\n        conn = connection_new ();\n    }\n    pthread_mutex_unlock (&pool->lock);\n\n    return conn;\n}\n\nvoid\nconnection_pool_return_connection (ConnectionPool *pool, Connection *conn)\n{\n    if (!conn)\n        return;\n\n    if (conn->release) {\n        connection_free (conn);\n        return;\n    }\n\n    curl_easy_reset (conn->curl);\n\n    pthread_mutex_lock (&pool->lock);\n    g_queue_push_tail (pool->queue, conn);\n    pthread_mutex_unlock (&pool->lock);\n}\n\nchar*\nhttp_code_to_str (int http_code)\n{\n    switch (http_code) {\n        case HTTP_OK:\n            return \"Successful\";\n        case HTTP_BAD_REQUEST:\n            return \"Bad request\";\n        case HTTP_FORBIDDEN:\n            return \"Permission denied\";\n        case HTTP_NOT_FOUND:\n            return \"Resource not found\";\n    }\n\n    if (http_code >= HTTP_INTERNAL_SERVER_ERROR)\n        return \"Internal server error\";\n\n    return \"Unknown error\";\n}\n\nvoid\nhttp_tx_manager_init ()\n{\n    curl_global_init (CURL_GLOBAL_ALL);\n}\n\ntypedef struct _HttpResponse {\n    char *content;\n    size_t size;\n} HttpResponse;\n\nstatic size_t\nrecv_response (void *contents, size_t size, size_t nmemb, void *userp)\n{\n    size_t realsize = size * nmemb;\n    HttpResponse *rsp = userp;\n\n    rsp->content = g_realloc (rsp->content, rsp->size + realsize);\n    if (!rsp->content) {\n        seaf_warning (\"Not enough memory.\\n\");\n        /* return a value other than realsize to signify an error. */\n        return 0;\n    }\n\n    memcpy (rsp->content + rsp->size, contents, realsize);\n    rsp->size += realsize;\n\n    return realsize;\n}\n\n#define HTTP_TIMEOUT_SEC 45\n\n/*\n * The @timeout parameter is for detecting network connection problems. \n * The @timeout parameter should be set to TRUE for data-transfer-only operations,\n * such as getting objects, blocks. For operations that requires calculations\n * on the server side, the timeout should be set to FALSE. Otherwise when\n * the server sometimes takes more than 45 seconds to calculate the result,\n * the client will time out.\n */\nstatic int\nhttp_get_common (CURL *curl, const char *url,\n                 struct curl_slist **headers,\n                 const char *token,\n                 int *rsp_status, char **rsp_content, gint64 *rsp_size,\n                 HttpRecvCallback callback, void *cb_data,\n                 gboolean timeout)\n{\n    int ret = 0;\n\n    if (token) {\n        char *token_header = g_strdup_printf (\"Authorization: Token %s\", token);\n        *headers = curl_slist_append (*headers, token_header);\n        g_free (token_header);\n    }\n    *headers = curl_slist_append (*headers, \"User-Agent: Seafile Server\");\n    *headers = curl_slist_append (*headers, \"Content-Type: application/json\");\n    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, *headers);\n\n    curl_easy_setopt(curl, CURLOPT_URL, url);\n    curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L);\n\n    if (timeout) {\n        /* Set low speed limit to 1 bytes. This effectively means no data. */\n        curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, 1);\n        curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, HTTP_TIMEOUT_SEC);\n    }\n\n    /*if (seaf->disable_verify_certificate) {\n        curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0L);\n        curl_easy_setopt (curl, CURLOPT_SSL_VERIFYHOST, 0L);\n    }*/\n\n    HttpResponse rsp;\n    memset (&rsp, 0, sizeof(rsp));\n    if (rsp_content) {\n        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, recv_response);\n        curl_easy_setopt(curl, CURLOPT_WRITEDATA, &rsp);\n    } else if (callback) {\n        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, callback);\n        curl_easy_setopt(curl, CURLOPT_WRITEDATA, cb_data);\n    }\n\n    /*gboolean is_https = (strncasecmp(url, \"https\", strlen(\"https\")) == 0);\n    set_proxy (curl, is_https);*/\n\n    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);\n\n    int rc = curl_easy_perform (curl);\n    if (rc != 0) {\n        seaf_warning (\"libcurl failed to GET %s: %s.\\n\",\n                      url, curl_easy_strerror(rc));\n        ret = -1;\n        goto out;\n    }\n\n    long status;\n    rc = curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &status);\n    if (rc != CURLE_OK) {\n        seaf_warning (\"Failed to get status code for GET %s.\\n\", url);\n        ret = -1;\n        goto out;\n    }\n\n    *rsp_status = status;\n\n    if (rsp_content) {\n        *rsp_content = rsp.content;\n        *rsp_size = rsp.size;\n    }\n\nout:\n    if (ret < 0) {\n        g_free (rsp.content);\n    }\n    return ret;\n}\n\ntypedef struct _HttpRequest {\n    const char *content;\n    size_t size;\n} HttpRequest;\n\nstatic size_t\nsend_request (void *ptr, size_t size, size_t nmemb, void *userp)\n{\n    size_t realsize = size *nmemb;\n    size_t copy_size;\n    HttpRequest *req = userp;\n\n    if (req->size == 0)\n        return 0;\n\n    copy_size = MIN(req->size, realsize);\n    memcpy (ptr, req->content, copy_size);\n    req->size -= copy_size;\n    req->content = req->content + copy_size;\n\n    return copy_size;\n}\n\nstatic int\nhttp_post_common (CURL *curl, const char *url, \n                  struct curl_slist **headers,\n                  const char *token,\n                  const char *req_content, gint64 req_size,\n                  int *rsp_status, char **rsp_content, gint64 *rsp_size,\n                  gboolean timeout, int timeout_sec)\n{\n    int ret = 0;\n\n    if (token) {\n        char *token_header = g_strdup_printf (\"Authorization: Token %s\", token);\n        *headers = curl_slist_append (*headers, token_header);\n        g_free (token_header);\n    }\n    *headers = curl_slist_append (*headers, \"User-Agent: Seafile Server\");\n    *headers = curl_slist_append (*headers, \"Content-Type: application/json\");\n    curl_easy_setopt(curl, CURLOPT_HTTPHEADER, *headers);\n\n    curl_easy_setopt(curl, CURLOPT_URL, url);\n    curl_easy_setopt(curl, CURLOPT_POST, 1L);\n\n    if (timeout) {\n        /* Set low speed limit to 1 bytes. This effectively means no data. */\n        curl_easy_setopt(curl, CURLOPT_LOW_SPEED_LIMIT, 1);\n        curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout_sec);\n    }\n\n    /*if (seaf->disable_verify_certificate) {\n        curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0L);\n        curl_easy_setopt (curl, CURLOPT_SSL_VERIFYHOST, 0L);\n    }*/\n\n    HttpRequest req;\n    if (req_content) {\n        memset (&req, 0, sizeof(req));\n        req.content = req_content;\n        req.size = req_size;\n        curl_easy_setopt(curl, CURLOPT_READFUNCTION, send_request);\n        curl_easy_setopt(curl, CURLOPT_READDATA, &req);\n    }\n    curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)req_size);\n\n    curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L);\n\n    HttpResponse rsp;\n    memset (&rsp, 0, sizeof(rsp));\n    if (rsp_content) {\n        curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, recv_response);\n        curl_easy_setopt(curl, CURLOPT_WRITEDATA, &rsp);\n    }\n\n    /*gboolean is_https = (strncasecmp(url, \"https\", strlen(\"https\")) == 0);\n    set_proxy (curl, is_https);*/\n\n    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);\n    /* All POST requests should remain POST after redirect. */\n    curl_easy_setopt(curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL);\n\n    int rc = curl_easy_perform (curl);\n    if (rc != 0) {\n        seaf_warning (\"libcurl failed to POST %s: %s.\\n\",\n                      url, curl_easy_strerror(rc));\n        ret = -1;\n        goto out;\n    }\n\n    long status;\n    rc = curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &status);\n    if (rc != CURLE_OK) {\n        seaf_warning (\"Failed to get status code for POST %s.\\n\", url);\n        ret = -1;\n        goto out;\n    }\n\n    *rsp_status = status;\n\n    if (rsp_content) {\n        *rsp_content = rsp.content;\n        *rsp_size = rsp.size;\n    }\n\nout:\n    if (ret < 0) {\n        g_free (rsp.content);\n    }\n    return ret;\n}\n\nint\nhttp_post (Connection *conn, const char *url, const char *token,\n           const char *req_content, gint64 req_size,\n           int *rsp_status, char **rsp_content, gint64 *rsp_size,\n           gboolean timeout, int timeout_sec)\n{\n    struct curl_slist *headers = NULL;\n    int ret = 0;\n    CURL *curl;\n\n    curl = conn->curl;\n\n    g_return_val_if_fail (req_content != NULL, -1);\n\n    ret = http_post_common (curl, url, &headers, token, req_content, req_size,\n                            rsp_status, rsp_content, rsp_size, timeout, timeout_sec);\n    if (ret < 0) {\n        conn->release = TRUE;\n    }\n    curl_slist_free_all (headers);\n    return ret;\n}\n\nstatic char *\nparse_nickname (const char *rsp_content, int rsp_size)\n{\n    json_t *array = NULL, *object, *member;\n    json_error_t jerror;\n    size_t n;\n    int i;\n    char *nickname = NULL;\n\n    object = json_loadb (rsp_content, rsp_size, 0, &jerror);\n    if (!object) {\n        seaf_warning (\"Parse response failed: %s.\\n\", jerror.text);\n        return NULL;\n    }\n\n    array = json_object_get (object, \"user_list\");\n    if (!array) {\n        goto out;\n    }\n\n    n = json_array_size (array);\n    for (i = 0; i < n; ++i) {\n        json_t *obj = json_array_get (array, i);\n\n        member = json_object_get (obj, \"name\");\n        if (!member) {\n            continue;\n        }\n        nickname = g_strdup (json_string_value(member));\n        break;\n    }\nout:\n    json_decref (object);\n    return nickname;\n}\n\nstatic char *\ngen_jwt_token ()\n{\n    char *jwt_token = NULL;\n    gint64 now = (gint64)time(NULL);\n\n    jwt_t *jwt = NULL;\n\n    if (!seaf->seahub_pk) {\n        return NULL;\n    }\n\n    int ret = jwt_new (&jwt);\n    if (ret != 0 || jwt == NULL) {\n        seaf_warning (\"Failed to create jwt\\n\");\n        goto out;\n    }\n\n    ret = jwt_add_grant_bool (jwt, \"is_internal\", TRUE);\n    if (ret != 0) {\n        seaf_warning (\"Failed to add is_internal to jwt\\n\");\n        goto out;\n    }\n\n    ret = jwt_add_grant_int (jwt, \"exp\", now + 300);\n    if (ret != 0) {\n        seaf_warning (\"Failed to add expire time to jwt\\n\");\n        goto out;\n    }\n    ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->seahub_pk, strlen(seaf->seahub_pk));\n    if (ret != 0) {\n        seaf_warning (\"Failed to set alg\\n\");\n        goto out;\n    }\n\n    jwt_token = jwt_encode_str (jwt);\n\nout:\n    jwt_free (jwt);\n    return jwt_token;\n}\n\nchar *\nhttp_tx_manager_get_nickname (const char *modifier)\n{\n    Connection *conn = NULL;\n    struct curl_slist *headers = NULL;\n    int ret = 0;\n    CURL *curl;\n    json_t *content = NULL;\n    json_t *array = NULL;\n    int rsp_status;\n    char *req_content = NULL;\n    char *jwt_token = NULL;\n    char *rsp_content = NULL;\n    char *nickname = NULL;\n    gint64 rsp_size;\n    char *url = NULL;\n\n    jwt_token = gen_jwt_token ();\n    if (!jwt_token) {\n        return NULL;\n    }\n\n    conn = connection_pool_get_connection (seaf->seahub_conn_pool);\n    if (!conn) {\n        g_free (jwt_token);\n        seaf_warning (\"Failed to get connection: out of memory.\\n\");\n        return NULL;\n    }\n\n    content = json_object ();\n    array = json_array ();\n    json_array_append_new (array, json_string (modifier));\n    json_object_set_new (content, \"user_id_list\", array);\n    req_content  = json_dumps (content, JSON_COMPACT);\n    if (!req_content) {\n        json_decref (content);\n        seaf_warning (\"Failed to dump json request.\\n\");\n        goto out;\n    }\n    json_decref (content);\n\n    curl = conn->curl;\n\n    url = g_strdup_printf(\"%s/user-list/\", seaf->seahub_url);\n    ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content),\n                            &rsp_status, &rsp_content, &rsp_size, TRUE, 45);\n    if (ret < 0) {\n        conn->release = TRUE;\n        goto out;\n    }\n\n    if (rsp_status != HTTP_OK) {\n        goto out;\n    }\n\n    nickname = parse_nickname (rsp_content, rsp_size);\n\nout:\n    g_free (url);\n    g_free (jwt_token);\n    g_free (req_content);\n    g_free (rsp_content);\n    curl_slist_free_all (headers);\n    connection_pool_return_connection (seaf->seahub_conn_pool, conn);\n\n    return nickname;\n}\n\nstatic SeafileShareLinkInfo *\nparse_share_link_info (const char *rsp_content, int rsp_size)\n{\n    json_t *object;\n    json_error_t jerror;\n    size_t n;\n    int i;\n    const char *repo_id = NULL;\n    const char *file_path = NULL;\n    const char *parent_dir = NULL;\n    const char *share_type = NULL;\n    SeafileShareLinkInfo *info = NULL;\n\n    object = json_loadb (rsp_content, rsp_size, 0, &jerror);\n    if (!object) {\n        seaf_warning (\"Parse response failed: %s.\\n\", jerror.text);\n        return NULL;\n    }\n\n    repo_id = json_object_get_string_member (object, \"repo_id\");\n    if (!repo_id) {\n        seaf_warning (\"Failed to find repo_id in json.\\n\");\n        goto out;\n    }\n    file_path = json_object_get_string_member (object, \"file_path\");\n    parent_dir = json_object_get_string_member (object, \"parent_dir\");\n    share_type = json_object_get_string_member (object, \"share_type\");\n\n    info = g_object_new (SEAFILE_TYPE_SHARE_LINK_INFO,\n                         \"repo_id\", repo_id,\n                         \"file_path\", file_path,\n                         \"parent_dir\", parent_dir,\n                         \"share_type\", share_type,\n                         NULL);\n\nout:\n    json_decref (object);\n    return info;\n}\n\nchar *\nparse_error_message (const char *rsp_content, int rsp_size)\n{\n    json_t *object;\n    json_error_t jerror;\n    const char *err_msg = NULL;\n    char *ret = NULL;\n\n    if (!rsp_content) {\n        return NULL;\n    }\n\n    object = json_loadb (rsp_content, rsp_size, 0, &jerror);\n    if (!object) {\n        ret = g_strdup (rsp_content);\n        return ret;\n    }\n\n    err_msg = json_object_get_string_member (object, \"error_msg\");\n    if (!err_msg) {\n        ret = g_strdup (rsp_content);\n        goto out;\n    }\n    ret = g_strdup (err_msg);\n\nout:\n    json_decref (object);\n\n    return ret;\n}\n\nSeafileShareLinkInfo *\nhttp_tx_manager_query_share_link_info (const char *token, const char *cookie, const char *type,\n                                       const char *ip_addr, const char *user_agent, int *status, char **err_msg)\n{\n    Connection *conn = NULL;\n    char *cookie_header;\n    struct curl_slist *headers = NULL;\n    int ret = 0;\n    CURL *curl;\n    json_t *content = NULL;\n    char *req_content = NULL;\n    int rsp_status;\n    char *jwt_token = NULL;\n    char *rsp_content = NULL;\n    gint64 rsp_size;\n    SeafileShareLinkInfo *info = NULL;\n    char *url = NULL;\n\n    jwt_token = gen_jwt_token ();\n    if (!jwt_token) {\n        return NULL;\n    }\n\n    conn = connection_pool_get_connection (seaf->seahub_conn_pool);\n    if (!conn) {\n        g_free (jwt_token);\n        seaf_warning (\"Failed to get connection: out of memory.\\n\");\n        return NULL;\n    }\n\n    content = json_object ();\n    json_object_set_new (content, \"token\", json_string(token));\n    if (ip_addr)\n        json_object_set_new (content, \"ip_addr\", json_string(ip_addr));\n    if (user_agent)\n        json_object_set_new (content, \"user_agent\", json_string(user_agent));\n    req_content  = json_dumps (content, JSON_COMPACT);\n    if (!req_content) {\n        seaf_warning (\"Failed to dump json request.\\n\");\n        goto out;\n    }\n\n    curl = conn->curl;\n    if (cookie) {\n        cookie_header = g_strdup_printf (\"Cookie: %s\", cookie);\n        headers = curl_slist_append (headers, cookie_header);\n        g_free (cookie_header);\n    }\n\n    url = g_strdup_printf(\"%s/check-share-link-access/?type=%s\", seaf->seahub_url, type);\n    ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content),\n                            &rsp_status, &rsp_content, &rsp_size, TRUE, 45);\n    if (ret < 0) {\n        conn->release = TRUE;\n        goto out;\n    }\n\n    *status = rsp_status;\n    if (rsp_status != HTTP_OK) {\n        *err_msg = parse_error_message (rsp_content, rsp_size);\n        goto out;\n    }\n\n    info = parse_share_link_info (rsp_content, rsp_size);\n\nout:\n    if (content)\n        json_decref (content);\n    g_free (url);\n    g_free (jwt_token);\n    g_free (req_content);\n    g_free (rsp_content);\n    curl_slist_free_all (headers);\n    connection_pool_return_connection (seaf->seahub_conn_pool, conn);\n\n    return info;\n}\n\nchar *\nparse_file_access_info (const char *rsp_content, int rsp_size)\n{\n    json_t *object;\n    json_error_t jerror;\n    const char *user = NULL;\n    char *ret = NULL;\n\n    object = json_loadb (rsp_content, rsp_size, 0, &jerror);\n    if (!object) {\n        seaf_warning (\"Failed to parse response when check file access in Seahub: %s.\\n\", jerror.text);\n        return NULL;\n    }\n\n    user = json_object_get_string_member (object, \"user\");\n    if (!user) {\n        seaf_warning (\"Failed to find user in json when check file access in Seahub.\\n\");\n        goto out;\n    }\n    ret = g_strdup (user);\n\nout:\n    json_decref (object);\n\n    return ret;\n}\n\nint\nhttp_tx_manager_check_file_access (const char *repo_id, const char *token, const char *cookie,\n                                   const char *path, const char *op, const char *ip_addr,\n                                   const char *user_agent, char **user,\n                                   int *status, char **err_msg)\n{\n    Connection *conn = NULL;\n    char *cookie_header;\n    struct curl_slist *headers = NULL;\n    int ret = -1;\n    CURL *curl;\n    json_t *content = NULL;\n    int rsp_status;\n    char *req_content = NULL;\n    char *jwt_token = NULL;\n    char *rsp_content = NULL;\n    gint64 rsp_size;\n    char *url = NULL;\n\n    jwt_token = gen_jwt_token ();\n    if (!jwt_token) {\n        return -1;\n    }\n\n    conn = connection_pool_get_connection (seaf->seahub_conn_pool);\n    if (!conn) {\n        g_free (jwt_token);\n        seaf_warning (\"Failed to get connection: out of memory.\\n\");\n        return -1;\n    }\n\n    content = json_object ();\n    json_object_set_new (content, \"op\", json_string(op));\n    if (token) {\n        json_object_set_new (content, \"token\", json_string(token));\n    }\n    json_object_set_new (content, \"path\", json_string(path));\n    if (ip_addr)\n        json_object_set_new (content, \"ip_addr\", json_string(ip_addr));\n    if (user_agent)\n        json_object_set_new (content, \"user_agent\", json_string(user_agent));\n    req_content  = json_dumps (content, JSON_COMPACT);\n    if (!req_content) {\n        ret = -1;\n        seaf_warning (\"Failed to dump json request.\\n\");\n        goto out;\n    }\n\n    curl = conn->curl;\n    if (cookie) {\n        cookie_header = g_strdup_printf (\"Cookie: %s\", cookie);\n        headers = curl_slist_append (headers, cookie_header);\n        g_free (cookie_header);\n    }\n\n    url = g_strdup_printf(\"%s/repos/%s/check-access/\", seaf->seahub_url, repo_id);\n    ret = http_post_common (curl, url, &headers, jwt_token, req_content, strlen(req_content),\n                            &rsp_status, &rsp_content, &rsp_size, TRUE, 45);\n    if (ret < 0) {\n        conn->release = TRUE;\n        goto out;\n    }\n\n    *status = rsp_status;\n    if (rsp_status != HTTP_OK) {\n        *err_msg = parse_error_message (rsp_content, rsp_size);\n        ret = -1;\n        goto out;\n    }\n\n    *user = parse_file_access_info (rsp_content, rsp_size);\n    if (*user == NULL) {\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (content)\n        json_decref (content);\n    g_free (url);\n    g_free (jwt_token);\n    g_free (req_content);\n    g_free (rsp_content);\n    curl_slist_free_all (headers);\n    connection_pool_return_connection (seaf->seahub_conn_pool, conn);\n\n    return ret;\n}\n"
  },
  {
    "path": "server/http-tx-mgr.h",
    "content": "#ifndef HTTP_TX_MGR_H\n#define HTTP_TX_MGR_H\n\n#include <curl/curl.h>\n\n#define HTTP_OK 200\n#define HTTP_BAD_REQUEST 400\n#define HTTP_FORBIDDEN 403\n#define HTTP_NOT_FOUND 404\n#define HTTP_NO_QUOTA 443\n#define HTTP_REPO_DELETED 444\n#define HTTP_INTERNAL_SERVER_ERROR 500\n\ntypedef struct _Connection Connection;\ntypedef struct _ConnectionPool ConnectionPool;\n\nConnectionPool *\nconnection_pool_new ();\n\nConnection *\nconnection_pool_get_connection (ConnectionPool *pool);\n\nvoid\nconnection_pool_return_connection (ConnectionPool *pool, Connection *conn);\n\nvoid\nconnection_pool_free (ConnectionPool *pool);\n\nchar*\nhttp_code_to_str (int http_code);\n\ntypedef size_t (*HttpRecvCallback) (void *, size_t, size_t, void *);\n\nint\nhttp_get (Connection *conn, const char *url, const char *token,\n          int *rsp_status, char **rsp_content, gint64 *rsp_size,\n          HttpRecvCallback callback, void *cb_data,\n          gboolean timeout);\n\nint\nhttp_post (Connection *conn, const char *url, const char *token,\n           const char *req_content, gint64 req_size,\n           int *rsp_status, char **rsp_content, gint64 *rsp_size,\n           gboolean timeout, int timeout_sec);\n\nvoid\nhttp_tx_manager_init ();\n\nchar *\nhttp_tx_manager_get_nickname (const char *modifier);\n\nSeafileShareLinkInfo *\nhttp_tx_manager_query_share_link_info (const char *token, const char *cookie, const char *type,\n                                       const char *ip_addr, const char *user_agent,\n                                       int *status, char **err_msg);\n\nint\nhttp_tx_manager_check_file_access (const char *repo_id, const char *token, const char *cookie,\n                                   const char *path, const char *op, const char *ip_addr,\n                                   const char *user_agent, char **user,\n                                   int *status, char **err_msg);\n#endif\n"
  },
  {
    "path": "server/index-blocks-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <glib/gstdio.h>\n\n#include <jansson.h>\n#include <openssl/sha.h>\n\n#include <timer.h>\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n#include \"seafile-crypt.h\"\n#include \"index-blocks-mgr.h\"\n\n#define TOKEN_LEN 36\n#define PROGRESS_TTL 5 * 3600 // 5 hours\n#define SCAN_PROGRESS_INTERVAL 24 * 3600 // 1 day\n\nstatic void\nstart_index_task (gpointer data, gpointer user_data);\n\nstatic char *\ngen_new_token (GHashTable *token_hash);\n\nstatic int\nscan_progress (void *data);\n\nstruct SeafileCrypt;\n\ntypedef struct IndexBlksMgrPriv {\n    pthread_mutex_t progress_lock;\n    GHashTable *progress_store;\n    GThreadPool *idx_tpool;\n    // This timer is used to scan progress and remove invalid progress.\n    CcnetTimer *scan_progress_timer;\n} IndexBlksMgrPriv;\n\ntypedef struct IndexPara {\n    GList *filenames;\n    GList *paths;\n    SeafRepo *repo;\n    char *user;\n    char *canon_path;\n    int replace_existed;\n    SeafileCrypt *crypt;\n    gboolean ret_json;\n    IdxProgress *progress;\n} IndexPara;\n\nstatic void\nfree_progress (IdxProgress *progress)\n{\n    if (!progress)\n        return;\n\n    g_free (progress->ret_json);\n    g_free (progress);\n}\n\n\nIndexBlksMgr *\nindex_blocks_mgr_new (SeafileSession *session)\n{\n    GError *error = NULL;\n    IndexBlksMgr *mgr = g_new0 (IndexBlksMgr, 1);\n    IndexBlksMgrPriv *priv = g_new0 (IndexBlksMgrPriv, 1);\n\n    priv->idx_tpool = g_thread_pool_new (start_index_task,\n                                         priv,\n                                         session->max_index_processing_threads,\n                                         FALSE, &error);\n    if (!priv->idx_tpool) {\n        if (error) {\n            seaf_warning (\"Failed to create index task thread pool: %s.\\n\", error->message);\n            g_clear_error (&error);\n        } else {\n            seaf_warning (\"Failed to create index task thread pool.\\n\");\n        }\n        g_free (priv);\n        g_free (mgr);\n        return NULL;\n    }\n\n    pthread_mutex_init (&priv->progress_lock, NULL);\n    priv->progress_store = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,\n                                                  (GDestroyNotify)free_progress);\n    priv->scan_progress_timer = ccnet_timer_new (scan_progress, priv,\n                                                 SCAN_PROGRESS_INTERVAL * 1000);\n    mgr->priv = priv;\n\n    return mgr;\n}\n\nstatic int\nscan_progress (void *data)\n{\n    time_t now = time(NULL);\n    IndexBlksMgrPriv *priv = data;\n    GHashTableIter iter;\n    gpointer key, value;\n    IdxProgress *progress;\n\n    pthread_mutex_lock (&priv->progress_lock);\n\n    g_hash_table_iter_init (&iter, priv->progress_store);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        progress = value;\n        if (now >= progress->expire_ts && progress->status != 1) {\n            g_hash_table_iter_remove (&iter);\n        }\n    }\n\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    return TRUE;\n}\n\nstatic void\nfree_index_para (IndexPara *idx_para)\n{\n    if (!idx_para)\n        return;\n\n    string_list_free (idx_para->filenames);\n    string_list_free (idx_para->paths);\n    seaf_repo_unref (idx_para->repo);\n    g_free (idx_para->user);\n    g_free (idx_para->canon_path);\n    g_free (idx_para->crypt);\n    g_free (idx_para);\n}\n\nstatic void\nstart_index_task (gpointer data, gpointer user_data)\n{\n    IndexPara *idx_para = data;\n    SeafRepo *repo = idx_para->repo;\n    GList *ptr = NULL, *id_list = NULL, *size_list = NULL;\n    char *path = NULL;\n    char *ret_json = NULL;\n    char *gc_id = NULL;\n    char hex[41];\n    unsigned char sha1[20];\n    int ret = 0;\n    IdxProgress *progress = idx_para->progress;\n    SeafileCrypt *crypt = idx_para->crypt;\n\n    gc_id = seaf_repo_get_current_gc_id(repo);\n    gint64 *size;\n    for (ptr = idx_para->paths; ptr; ptr = ptr->next) {\n        path = ptr->data;\n\n        size = g_new (gint64, 1);\n        if (seaf_fs_manager_index_blocks (seaf->fs_mgr,\n                    repo->store_id, repo->version,\n                    path, sha1, size, crypt, TRUE, FALSE, &(progress->indexed)) < 0) {\n            seaf_warning (\"failed to index blocks\");\n            progress->status = -1;\n            goto out;\n        }\n\n        rawdata_to_hex(sha1, hex, 20);\n        id_list = g_list_prepend (id_list, g_strdup(hex));\n        size_list = g_list_prepend (size_list, size);\n    }\n    id_list = g_list_reverse (id_list);\n    size_list = g_list_reverse (size_list);\n    ret = post_files_and_gen_commit (idx_para->filenames,\n                                     idx_para->repo->id,\n                                     idx_para->user,\n                                     idx_para->ret_json ? &ret_json : NULL,\n                                     idx_para->replace_existed,\n                                     idx_para->canon_path,\n                                     id_list,\n                                     size_list,\n                                     0,\n                                     gc_id,\n                                     NULL);\n    progress->status = ret;\n    if (idx_para->ret_json) {\n        progress->ret_json = g_strdup(ret_json);\n        g_free (ret_json);\n    }\n\nout:\n    /* remove temp files */\n    for (ptr = idx_para->paths; ptr; ptr = ptr->next)\n        g_unlink (ptr->data);\n\n    g_list_free_full (id_list, g_free);\n    g_list_free_full (size_list, g_free);\n    free_index_para (idx_para);\n    g_free (gc_id);\n    return;\n}\n\nchar *\nindex_blocks_mgr_query_progress (IndexBlksMgr *mgr,\n                                 const char *token,\n                                 GError **error)\n{\n    char *ret_info;\n    json_t *obj;\n    IdxProgress *progress;\n    IndexBlksMgrPriv *priv = mgr->priv;\n\n    pthread_mutex_lock (&priv->progress_lock);\n    progress = g_hash_table_lookup (priv->progress_store, token);\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    if (!progress) {\n        seaf_warning (\"Index progress not found for token %s\\n\", token);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Index progress not found\");\n        return NULL;\n    }\n\n    obj = json_object ();\n    json_object_set_int_member (obj, \"indexed\", progress->indexed);\n    json_object_set_int_member (obj, \"total\", progress->total);\n    json_object_set_int_member (obj, \"status\", progress->status);\n    json_object_set_string_member (obj, \"ret_json\", progress->ret_json);\n    ret_info = json_dumps (obj, JSON_COMPACT);\n    json_decref (obj);\n\n    /* index finished */\n    if (progress->status != 1) {\n        pthread_mutex_lock (&priv->progress_lock);\n        g_hash_table_remove (priv->progress_store, token);\n        pthread_mutex_unlock (&priv->progress_lock);\n    }\n\n    return ret_info;\n}\n\nint\nindex_blocks_mgr_start_index (IndexBlksMgr *mgr,\n                              GList *filenames,\n                              GList *paths,\n                              const char *repo_id,\n                              const char *user,\n                              int replace_existed,\n                              gboolean ret_json,\n                              const char *canon_path,\n                              SeafileCrypt *crypt,\n                              char **task_id)\n{\n    GList *ptr = NULL;\n    char *path = NULL, *token = NULL;\n    SeafileCrypt *_crypt = NULL;\n\n    SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %.8s.\\n\", repo_id);\n        return -1;\n    }\n    IndexBlksMgrPriv *priv = mgr->priv;\n\n    token = gen_new_token(priv->progress_store);\n    if (!token) {\n        seaf_warning (\"Failed to genarate index token for repo %.8s.\\n\", repo_id);\n        seaf_repo_unref (repo);\n        return -1;\n    }\n    if (crypt) {\n        _crypt = g_new0(SeafileCrypt, 1);\n        memcpy (_crypt, crypt, sizeof (SeafileCrypt));\n    }\n\n    *task_id = g_strdup (token);\n    IdxProgress *progress = g_new0(IdxProgress, 1);\n    progress->status = 1;\n\n    IndexPara *idx_para = g_new0 (IndexPara, 1);\n    idx_para->filenames = g_list_copy_deep (filenames, (GCopyFunc)g_strdup, NULL);\n    idx_para->paths = g_list_copy_deep (paths, (GCopyFunc)g_strdup, NULL);\n    idx_para->repo = repo;\n    idx_para->user = g_strdup (user);\n    idx_para->canon_path = g_strdup(canon_path);\n    idx_para->replace_existed = replace_existed;\n    idx_para->ret_json = ret_json;\n    idx_para->crypt = _crypt;\n    idx_para->progress = progress;\n\n    progress->status = 1;\n    progress->expire_ts = time(NULL) + PROGRESS_TTL;\n\n    /* Get total size of all files for progress. */\n    for (ptr = paths; ptr; ptr = ptr->next) {\n        SeafStat sb;\n        path = ptr->data;\n        if (seaf_stat (path, &sb) < 0) {\n            seaf_warning (\"Bad file %s: %s.\\n\", path, strerror(errno));\n            goto error;\n        }\n\n        if (!S_ISREG(sb.st_mode))\n            goto error;\n\n        progress->total += (gint64)sb.st_size;\n    }\n\n    pthread_mutex_lock (&priv->progress_lock);\n    g_hash_table_replace (priv->progress_store, g_strdup (token), progress);\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    g_thread_pool_push (priv->idx_tpool, idx_para, NULL);\n\n    g_free (token);\n    return 0;\n\nerror:\n    g_free (token);\n    /* remove temp files */\n    for (ptr = idx_para->paths; ptr; ptr = ptr->next)\n        g_unlink (ptr->data);\n\n    free_index_para (idx_para);\n    g_free (progress);\n\n    return -1;\n}\n\nstatic char *\ngen_new_token (GHashTable *token_hash)\n{\n    char uuid[37];\n    char *token;\n\n    while (1) {\n        gen_uuid_inplace (uuid);\n        token = g_strndup(uuid, TOKEN_LEN);\n\n        /* Make sure the new token doesn't conflict with an existing one. */\n        if (g_hash_table_lookup (token_hash, token) != NULL)\n            g_free (token);\n        else\n            return token;\n    }\n}\n"
  },
  {
    "path": "server/index-blocks-mgr.h",
    "content": "#ifndef INDEX_BLOCKS_MGR_H\n#define INDEX_BLOCKS_MGR_H\n\n#include \"seafile-object.h\"\n\nstruct IndexBlksMgrPriv;\nstruct _SeafileSession;\n\ntypedef struct IndexBlksMgr {\n    struct IndexBlksMgrPriv *priv;\n} IndexBlksMgr;\n\ntypedef struct IdxProgress {\n    gint64 indexed;\n    gint64 total;\n    int status; /* 0: finished, -1: error, 1: indexing */\n    char *ret_json;\n    gint64 expire_ts;\n} IdxProgress;\n\nIndexBlksMgr *\nindex_blocks_mgr_new (struct _SeafileSession *session);\n\nchar *\nindex_blocks_mgr_query_progress (IndexBlksMgr *mgr,\n                                 const char *token,\n                                 GError **error);\n\nint\nindex_blocks_mgr_start_index (IndexBlksMgr *mgr,\n                              GList *filenames,\n                              GList *paths,\n                              const char *repo_id,\n                              const char *user,\n                              int replace_existed,\n                              gboolean ret_json,\n                              const char *canon_path,\n                              SeafileCrypt *crypt,\n                              char **task_id);\n\n#endif\n"
  },
  {
    "path": "server/metric-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include <string.h>\n#include <jansson.h>\n\n#include \"seafile-session.h\"\n#include \"metric-mgr.h\"\n#include \"obj-cache.h\"\n\n#define PUBLISH_INTERVAL 30 /* 30 seconds*/\n#define REDIS_CHANNEL \"metric_channel\"\n#define COMPONENT_NAME \"fileserver\"\n\nstruct _SeafMetricManagerPriv {\n    int in_flight_request_count;\n\n    struct ObjCache *cache;\n};\n\nSeafMetricManager* \nseaf_metric_manager_new (struct _SeafileSession *seaf)\n{\n    SeafMetricManager *mgr = g_new0 (SeafMetricManager, 1);\n\n    mgr->priv = g_new0 (SeafMetricManagerPriv, 1);\n    mgr->seaf = seaf;\n\n    // redis cache\n    mgr->priv->cache = seaf->obj_cache;\n\n    return mgr;\n}\n\nstatic void *\npublish_metrics (void *data);\n\nint\nseaf_metric_manager_start (SeafMetricManager *mgr)\n{\n    pthread_t tid;\n    int rc;\n\n    rc = pthread_create (&tid, NULL, publish_metrics, mgr);\n    if (rc != 0) {\n        seaf_warning (\"Failed to create publish metrics worker thread: %s.\\n\",\n                      strerror(rc));\n        return -1;\n    }\n\n    return 0;\n}\n\nvoid\nseaf_metric_manager_in_flight_request_inc (SeafMetricManager *mgr)\n{\n    SeafMetricManagerPriv *priv = mgr->priv;\n\n    g_atomic_int_inc (&priv->in_flight_request_count);\n}\n\nvoid\nseaf_metric_manager_in_flight_request_dec (SeafMetricManager *mgr)\n{\n    SeafMetricManagerPriv *priv = mgr->priv;\n    g_atomic_int_dec_and_test (&priv->in_flight_request_count);\n}\n\nstatic int\npublish_redis_msg (SeafMetricManager *mgr, const char *msg)\n{\n    SeafMetricManagerPriv *priv = mgr->priv;\n\n    if (!priv->cache) {\n        return 0;\n    }\n\n    int ret = objcache_publish (priv->cache, REDIS_CHANNEL, msg); \n\n    return ret;\n}\n\nstatic int\npublish_in_flight_request (SeafMetricManager *mgr)\n{\n    int ret = 0;\n    json_t *obj = NULL;\n    char *msg = NULL;\n    SeafMetricManagerPriv *priv = mgr->priv;\n\n    obj = json_object ();\n\n    json_object_set_new (obj, \"metric_name\", json_string(\"in_flight_request_total\"));\n    json_object_set_new (obj, \"metric_value\", json_integer (priv->in_flight_request_count));\n    json_object_set_new (obj, \"metric_type\", json_string(\"gauge\"));\n    json_object_set_new (obj, \"component_name\", json_string(COMPONENT_NAME));\n    json_object_set_new (obj, \"metric_help\", json_string(\"The number of currently running http requests.\"));\n    json_object_set_new (obj, \"node_name\", json_string(seaf->node_name));\n\n    msg = json_dumps (obj, JSON_COMPACT);\n\n    ret = publish_redis_msg (mgr, msg);\n\n    json_decref (obj);\n    g_free (msg);\n    return ret;\n}\n\nstatic void\ndo_publish_metrics (SeafMetricManager *mgr)\n{\n    int rc;\n\n    // Don't publish metrics when use go fileserver.\n    if (seaf->go_fileserver) {\n        return;\n    }\n\n    rc = publish_in_flight_request (mgr);\n    if (rc < 0) {\n        seaf_warning (\"Failed to publish in flight request\\n\");\n        return;\n    }\n}\n\nstatic void *\npublish_metrics (void *data)\n{\n    SeafMetricManager *mgr = data;\n\n    while (1) {\n        do_publish_metrics (mgr);\n        sleep(PUBLISH_INTERVAL);\n    }\n\n    return NULL;\n}\n"
  },
  {
    "path": "server/metric-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_METRIC_MGR_H\n#define SEAF_METRIC_MGR_H\n\nstruct _SeafMetricManager;\n\ntypedef struct _SeafMetricManager SeafMetricManager;\n\ntypedef struct _SeafMetricManagerPriv SeafMetricManagerPriv;\n\nstruct _SeafMetricManager {\n    struct _SeafileSession *seaf;\n\n    SeafMetricManagerPriv *priv;\n};\n\nSeafMetricManager* \nseaf_metric_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_metric_manager_start (SeafMetricManager *mgr);\n\nvoid\nseaf_metric_manager_in_flight_request_inc (SeafMetricManager *mgr);\n\nvoid\nseaf_metric_manager_in_flight_request_dec (SeafMetricManager *mgr);\n\n#endif\n"
  },
  {
    "path": "server/notif-mgr.c",
    "content": "#include \"common.h\"\n\n#include <pthread.h>\n#include <jansson.h>\n\n#include <timer.h>\n#include <jwt.h>\n\n#include \"seafile-session.h\"\n#include \"http-tx-mgr.h\"\n#include \"notif-mgr.h\"\n\n#include \"utils.h\"\n#include \"seafile-error.h\"\n\n#include \"log.h\"\n\n#define NOTIF_TIMEOUT_SEC 1\n#define JWT_TOKEN_EXPIRE_TIME 300 /* 5 minutes */\n\nstruct _NotifPriv {\n    char *notif_url;\n\n    ConnectionPool *connection_pool;\n};\ntypedef struct _NotifPriv NotifPriv;\n\ntypedef struct Event {\n    NotifPriv *priv;\n    char *msg;\n} Event;\n\nNotifManager *\nseaf_notif_manager_new (struct _SeafileSession *seaf, char *url)\n{\n    NotifManager *mgr = g_new0 (NotifManager, 1);\n    mgr->seaf = seaf;\n\n    NotifPriv *priv = g_new0 (NotifPriv, 1);\n\n    priv->connection_pool = connection_pool_new ();\n    if (!priv->connection_pool) {\n        g_free (priv);\n        g_free (mgr);\n        return NULL;\n    }\n\n    priv->notif_url = url;\n    mgr->priv = priv;\n\n    return mgr;\n}\n\nstatic char *\ngen_jwt_token ()\n{\n    char *jwt_token = NULL;\n    gint64 now = (gint64)time(NULL);\n\n    jwt_t *jwt = NULL;\n\n    if (!seaf->notif_server_private_key) {\n        seaf_warning (\"No private key is configured for generating jwt token\\n\");\n        return NULL;\n    }\n\n    int ret = jwt_new (&jwt);\n    if (ret != 0 || jwt == NULL) {\n        seaf_warning (\"Failed to create jwt\\n\");\n        goto out;\n    }\n\n    ret = jwt_add_grant_int (jwt, \"exp\", now + JWT_TOKEN_EXPIRE_TIME);\n    if (ret != 0) {\n        seaf_warning (\"Failed to expire time to jwt\\n\");\n        goto out;\n    }\n    ret = jwt_set_alg (jwt, JWT_ALG_HS256, (unsigned char *)seaf->notif_server_private_key, strlen(seaf->notif_server_private_key));\n    if (ret != 0) {\n        seaf_warning (\"Failed to set alg\\n\");\n        goto out;\n    }\n\n    jwt_token = jwt_encode_str (jwt);\n\nout:\n    jwt_free (jwt);\n    return jwt_token;\n}\n\nstatic void*\nsend_event (void *data)\n{\n    Event *event= data;\n    NotifPriv *priv = event->priv;\n    Connection *conn = NULL;\n    int rsp_status;\n    char *req_url = NULL;\n    char *jwt_token = NULL;\n\n    jwt_token = gen_jwt_token ();\n    if (!jwt_token) {\n        return event;\n    }\n\n    conn = connection_pool_get_connection (priv->connection_pool);\n    if (!conn) {\n        g_free (jwt_token);\n        seaf_warning (\"Failed to get connection: out of memory.\\n\");\n        return event;\n    }\n\n    req_url = g_strdup_printf (\"%s/events\", priv->notif_url);\n\n    int ret;\n\n    ret = http_post (conn, req_url, jwt_token, event->msg, strlen (event->msg),\n                     &rsp_status, NULL, NULL, TRUE, NOTIF_TIMEOUT_SEC);\n    if (ret < 0) {\n        goto out;\n    }\n\n    if (rsp_status != HTTP_OK) {\n        seaf_warning (\"Failed to send event to notification server %s: %d.\\n\",\n                      priv->notif_url, rsp_status);\n    }\n\nout:\n    g_free (jwt_token);\n    g_free (req_url);\n    connection_pool_return_connection (priv->connection_pool, conn);\n\n    return event;\n}\n\nstatic void\nfree_send_event(void *data)\n{\n    if (!data)\n        return;\n\n    Event *event= data;\n\n    if (event->msg)\n        g_free (event->msg);\n\n    g_free (event);\n}\n\nvoid\nseaf_notif_manager_send_event (NotifManager *mgr, const char *msg)\n{\n    Event *event = g_new0 (Event, 1);\n    event->priv = mgr->priv;\n    event->msg = g_strdup (msg);\n\n    ccnet_job_manager_schedule_job (seaf->job_mgr,\n                                    send_event,\n                                    free_send_event,\n                                    event);\n\n}\n"
  },
  {
    "path": "server/notif-mgr.h",
    "content": "#ifndef HTTP_NOTIFICATION_MGR_H\n#define HTTP_NOTIFICATION_MGR_H\n\nstruct _NotifManager {\n    struct _SeafileSession   *seaf;\n\n    struct _NotifPriv *priv;\n};\n\ntypedef struct _NotifManager NotifManager;\n\nNotifManager *\nseaf_notif_manager_new (struct _SeafileSession *seaf, char *url);\n\nvoid\nseaf_notif_manager_send_event (NotifManager *mgr,\n                               const char *msg);\n\n#endif\n"
  },
  {
    "path": "server/pack-dir.c",
    "content": "#include \"common.h\"\n#ifdef HAVE_EVHTP\n\n#define DEBUG_FLAG SEAFILE_DEBUG_HTTP\n#include \"log.h\"\n\n#include \"seafile-object.h\"\n#include \"seafile-crypt.h\"\n#include \"seafile-error.h\"\n\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"pack-dir.h\"\n#include \"seaf-utils.h\"\n\n#include <archive.h>\n#include <archive_entry.h>\n#include <iconv.h>\n\n#ifdef WIN32\n#define S_IFLNK    0120000 /* Symbolic link */\n#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK)\n#endif\n\n\ntypedef struct {\n    struct archive *a;\n    SeafileCrypt *crypt;\n    const char *top_dir_name;\n    gboolean is_windows;\n    time_t mtime;\n    char store_id[37];\n    int repo_version;\n    int tmp_fd;\n    char *tmp_zip_file;\n} PackDirData;\n\nstatic char *\ndo_iconv (char *fromcode, char *tocode, char *in)\n{\n    iconv_t conv;\n    size_t inlen, outlen, len;\n    char out[1024];\n    char *pin = in;\n    char *pout = out;\n    \n    conv = iconv_open (tocode, fromcode);\n    if (conv == (iconv_t)-1) {\n        return NULL;\n    }\n\n    inlen = strlen (in);\n    outlen = sizeof(out);\n\n    len = iconv (conv, &pin, &inlen, &pout, &outlen);\n    iconv_close (conv);\n\n    if (len == -1) {\n        return NULL;\n    }\n\n    outlen = sizeof(out) - outlen;\n\n    return g_strndup(out, outlen);\n}\n\nstatic int\nadd_file_to_archive (PackDirData *data,\n                     const char *parent_dir,\n                     const char *base_name,\n                     SeafDirent *dent)\n{\n    struct archive *a = data->a;\n    struct SeafileCrypt *crypt = data->crypt;\n    gboolean is_windows = data->is_windows;\n    const char *top_dir_name = data->top_dir_name;\n    \n    struct archive_entry *entry = NULL;\n    Seafile *file = NULL;\n    char *pathname = NULL;\n    char buf[64 * 1024];\n    int len = 0;\n    int n = 0;\n    int idx = 0;\n    BlockHandle *handle = NULL;\n    BlockMetadata *bmd = NULL;\n    char *blk_id = NULL;\n    uint32_t remain = 0;\n    EVP_CIPHER_CTX *ctx;\n    gboolean enc_init = FALSE;\n    char *dec_out = NULL;\n    int dec_out_len = -1;\n    int ret = 0;\n\n    pathname = g_build_filename (top_dir_name, parent_dir, base_name, NULL);\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr,\n                                        data->store_id, data->repo_version,\n                                        dent->id);\n    if (!file) {\n        ret = -1;\n        goto out;\n    }\n\n    entry = archive_entry_new ();\n\n    /* File name fixup for WinRAR */\n    if (is_windows && seaf->http_server->windows_encoding) {\n        char *win_file_name = do_iconv (\"UTF-8\",\n                                        seaf->http_server->windows_encoding,\n                                        pathname);\n        if (!win_file_name) {\n            seaf_warning (\"Failed to convert file name to %s\\n\",\n                          seaf->http_server->windows_encoding);\n            ret = -1;\n            goto out;\n        }\n        archive_entry_copy_pathname (entry, win_file_name);\n        g_free (win_file_name);\n\n    } else {\n        archive_entry_set_pathname (entry, pathname);\n    }\n\n    /* FIXME: 0644 should be set when upload files in repo-mgr.c */\n    archive_entry_set_mode (entry, dent->mode | 0644);\n    archive_entry_set_size (entry, file->file_size);\n    archive_entry_set_mtime (entry, data->mtime, 0);\n\n    n = archive_write_header (a, entry);\n    if (n != ARCHIVE_OK) {\n        seaf_warning (\"archive_write_header  error: %s\\n\", archive_error_string(a));\n        ret = -1;\n        goto out;\n    }\n\n    /* Read data of this entry block by block */\n    while (idx < file->n_blocks) {\n        blk_id = file->blk_sha1s[idx];\n        handle = seaf_block_manager_open_block (seaf->block_mgr,\n                                                data->store_id,\n                                                data->repo_version,\n                                                blk_id, BLOCK_READ);\n        if (!handle) {\n            seaf_warning (\"Failed to open block %s:%s\\n\", data->store_id, blk_id);\n            ret = -1;\n            goto out;\n        }\n\n        bmd = seaf_block_manager_stat_block_by_handle (seaf->block_mgr,\n                                                       handle);\n        if (!bmd) {\n            seaf_warning (\"Failed to stat block %s:%s\\n\", data->store_id, blk_id);\n            ret = -1;\n            goto out;\n        }\n        remain = bmd->size;\n        g_free (bmd);\n\n        if (crypt) {\n            if (seafile_decrypt_init (&ctx, crypt->version,\n                                      crypt->key, crypt->iv) < 0) {\n                seaf_warning (\"Failed to init decrypt.\\n\");\n                ret = -1;\n                goto out;\n            }\n            enc_init = TRUE;\n        }\n\n        while (remain != 0) {\n            n = seaf_block_manager_read_block (seaf->block_mgr, handle,\n                                               buf, sizeof(buf));\n            if (n <= 0) {\n                seaf_warning (\"failed to read block %s\\n\", blk_id);\n                ret = -1;\n                goto out;\n            }\n            remain -= n;\n\n            /* OK, We're read some data of this block  */\n            if (crypt == NULL) {\n                /* not encrypted */\n                len = archive_write_data (a, buf, n);\n                if (len <= 0) {\n                    seaf_warning (\"archive_write_data error: %s\\n\", archive_error_string(a));\n                    ret = -1;\n                    goto out;\n                }\n\n            } else {\n                /* an encrypted block */\n                dec_out = g_new (char, n + 16);\n                if (!dec_out) {\n                    seaf_warning (\"Failed to alloc memory.\\n\");\n                    ret = -1;\n                    goto out;\n                }\n\n                int r = EVP_DecryptUpdate (ctx,\n                                           (unsigned char *)dec_out,\n                                           &dec_out_len,\n                                           (unsigned char *)buf,\n                                           n);\n\n                /* EVP_DecryptUpdate returns 1 on success, 0 on failure */\n                if (r != 1) {\n                    seaf_warning (\"Decrypt block %s failed.\\n\", blk_id);\n                    ret = -1;\n                    goto out;\n                }\n\n                if (dec_out_len > 0) {\n                    len = archive_write_data (a, dec_out, dec_out_len);\n                    if (len <= 0) {\n                        seaf_warning (\"archive_write_data error: %s\\n\", archive_error_string(a));\n                        ret = -1;\n                        goto out;\n                    }\n                }\n\n                /* If it's the last piece of a block, call decrypt_final()\n                 * to decrypt the possible partial block. */\n                if (remain == 0) {\n                    r = EVP_DecryptFinal_ex (ctx,\n                                             (unsigned char *)dec_out,\n                                             &dec_out_len);\n                    if (r != 1) {\n                        seaf_warning (\"Decrypt block %s failed.\\n\", blk_id);\n                        ret = -1;\n                        goto out;\n                    }\n\n                    if (dec_out_len != 0) {\n                        len = archive_write_data (a, dec_out, dec_out_len);\n                        if (len <= 0) {\n                            seaf_warning (\"archive_write_data error: %s\\n\", archive_error_string(a));\n                            ret = -1;\n                            goto out;\n                        }\n                    }\n                }\n\n                g_free (dec_out);\n                dec_out = NULL;\n            }\n        }\n\n        seaf_block_manager_close_block (seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free (seaf->block_mgr, handle);\n        handle = NULL;\n\n        /* turn to next block */\n        idx++;\n    }\n\nout:\n    g_free (pathname);\n    if (entry)\n        archive_entry_free (entry);\n    if (file)\n        seafile_unref (file);\n    if (handle) {\n        seaf_block_manager_close_block (seaf->block_mgr, handle);\n        seaf_block_manager_block_handle_free(seaf->block_mgr, handle);\n    }\n    if (crypt != NULL && enc_init)\n        EVP_CIPHER_CTX_free (ctx);\n    g_free (dec_out);\n\n    return ret;\n}\n\nstatic int\narchive_dir (PackDirData *data,\n             const char *root_id,\n             const char *dirpath,\n             Progress *progress)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dent;\n    GList *ptr;\n    char *subpath = NULL;\n    int ret = 0;\n\n    dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                       data->store_id, data->repo_version,\n                                       root_id);\n    if (!dir) {\n        seaf_warning (\"failed to get dir %s:%s\\n\", data->store_id, root_id);\n        goto out;\n    }\n    if (!dir->entries) {\n        char *pathname = g_build_filename (data->top_dir_name, dirpath, NULL);\n        struct archive_entry *entry = archive_entry_new ();\n        gboolean is_windows = data->is_windows;\n\n        if (is_windows && seaf->http_server->windows_encoding) {\n            char *win_file_name = do_iconv (\"UTF-8\",\n                    seaf->http_server->windows_encoding,\n                    pathname);\n            if (!win_file_name) {\n                seaf_warning (\"Failed to convert file name to %s\\n\",\n                              seaf->http_server->windows_encoding);\n                ret = -1;\n                goto out;\n            }\n            archive_entry_copy_pathname (entry, win_file_name);\n            g_free (win_file_name);\n\n        } else {\n            archive_entry_set_pathname (entry, pathname);\n        }\n\n        archive_entry_set_filetype (entry, AE_IFDIR);\n        archive_entry_set_mtime (entry, data->mtime, 0);\n        archive_entry_set_perm (entry, 0755);\n        int n = archive_write_header (data->a, entry);\n        if (n != ARCHIVE_OK) {\n            seaf_warning (\"archive_write_header  error: %s\\n\", archive_error_string(data->a));\n            ret = -1;\n        }\n\n        archive_entry_free (entry);\n        g_free (pathname);\n        goto out;\n    }\n\n    for (ptr = dir->entries; ptr; ptr = ptr->next) {\n        if (progress->canceled) {\n            ret = -1;\n            goto out;\n        }\n\n        dent = ptr->data;\n        if (S_ISREG(dent->mode)) {\n            ret = add_file_to_archive (data, dirpath, dent->name, dent);\n            if (ret == 0) {\n                g_atomic_int_inc (&progress->zipped);\n            }\n        } else if (S_ISLNK(dent->mode)) {\n            if (archive_version_number() >= 3000001) {\n                /* Symlink in zip arhive is not supported in earlier version\n                 * of libarchive */\n                ret = add_file_to_archive (data, dirpath, dent->name, dent);\n            }\n\n        } else if (S_ISDIR(dent->mode)) {\n            subpath = g_build_filename (dirpath, dent->name, NULL);\n            ret = archive_dir (data, dent->id, subpath, progress);\n            g_free (subpath);\n        }\n\n        if (ret < 0) {\n            goto out;\n        }\n    }\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n\n    return ret;\n}\n\nstatic PackDirData *\npack_dir_data_new (const char *store_id,\n                   int repo_version,\n                   const char *dirname,\n                   SeafileCrypt *crypt,\n                   gboolean is_windows)\n{\n    struct archive *a = NULL;\n    char *tmpfile_name = NULL ;\n    int fd = -1;\n    PackDirData *data = NULL;\n\n    tmpfile_name = g_strdup_printf (\"%s/seafile-XXXXXX.zip\",\n                                    seaf->http_server->http_temp_dir);\n    fd = g_mkstemp (tmpfile_name);\n    if (fd < 0) {\n        seaf_warning (\"Failed to open temp file: %s.\\n\", strerror (errno));\n        g_free (tmpfile_name);\n        return NULL;\n    }\n\n    a = archive_write_new ();\n    archive_write_add_filter_none (a);\n    archive_write_set_format_zip (a);\n    archive_write_open_fd (a, fd);\n\n    data = g_new0 (PackDirData, 1);\n    data->crypt = crypt;\n    data->is_windows = is_windows;\n    data->a = a;\n    data->top_dir_name = dirname;\n    data->mtime = time(NULL);\n    memcpy (data->store_id, store_id, 36);\n    data->repo_version = repo_version;\n    data->tmp_fd = fd;\n    data->tmp_zip_file = tmpfile_name;\n\n    return data;\n}\n\nstatic gboolean\nname_exists (GList *file_list, const char *filename)\n{\n    GList *ptr;\n    char *name;\n\n    for (ptr = file_list; ptr != NULL; ptr = ptr->next) {\n        name = ptr->data;\n        if (strcmp (name, filename) == 0)\n            return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic char *\ngenerate_unique_filename (const char *file, GList *file_list)\n{\n    int i = 1;\n    char *name, *ext, *unique_name;\n\n    unique_name = g_strdup(file);\n    split_filename (unique_name, &name, &ext);\n    while (name_exists (file_list, unique_name)) {\n        g_free (unique_name);\n        if (ext)\n            unique_name = g_strdup_printf (\"%s (%d).%s\", name, i, ext);\n        else\n            unique_name = g_strdup_printf (\"%s (%d)\", name, i);\n        i++;\n    }\n\n    g_free (name);\n    g_free (ext);\n\n    return unique_name;\n}\n\nstatic int\narchive_multi (PackDirData *data, GList *dirent_list,\n               Progress *progress)\n{\n    GList *iter;\n    SeafDirent *dirent;\n    GList *file_list = NULL;\n\n    for (iter = dirent_list; iter; iter = iter->next) {\n        char *unique_name = NULL;\n        if (progress->canceled) {\n            string_list_free (file_list);\n            return -1;\n        }\n        dirent = iter->data;\n        if (S_ISREG(dirent->mode)) {\n            unique_name = generate_unique_filename (dirent->name, file_list);\n            file_list = g_list_prepend (file_list, unique_name);\n            if (add_file_to_archive (data, \"\", unique_name, dirent) < 0) {\n                string_list_free (file_list);\n                seaf_warning (\"Failed to archive file: %s.\\n\", dirent->name);\n                return -1;\n            }\n            g_atomic_int_inc (&progress->zipped);\n        } else if (S_ISDIR(dirent->mode)) {\n            unique_name = generate_unique_filename (dirent->name, file_list);\n            file_list = g_list_prepend (file_list, unique_name);\n            if (archive_dir (data, dirent->id, unique_name, progress) < 0) {\n                string_list_free (file_list);\n                seaf_warning (\"Failed to archive dir: %s.\\n\", dirent->name);\n                return -1;\n            }\n        }\n    }\n\n    string_list_free (file_list);\n    return 0;\n}\n\nint\npack_files (const char *store_id,\n            int repo_version,\n            const char *dirname,\n            void *internal,\n            SeafileCrypt *crypt,\n            gboolean is_windows,\n            Progress *progress)\n{\n    int ret = 0;\n    PackDirData *data = NULL;\n\n    data = pack_dir_data_new (store_id, repo_version, dirname,\n                              crypt, is_windows);\n    if (!data) {\n        seaf_warning (\"Failed to create pack dir data for %s.\\n\",\n                      strcmp (dirname, \"\")==0 ? \"multi files\" : dirname);\n        return -1;\n    }\n\n    progress->zip_file_path = data->tmp_zip_file;\n\n    if (strcmp (dirname, \"\") != 0) {\n        // Pack dir\n        if (archive_dir (data, (char *)internal, \"\", progress) < 0) {\n            if (progress->canceled)\n                seaf_warning (\"Zip task for dir %s in repo %.8s canceled.\\n\", dirname, store_id);\n            else\n                seaf_warning (\"Failed to archive dir %s in repo %.8s.\\n\", dirname, store_id);\n            ret = -1;\n        }\n    } else {\n        // Pack multi\n        if (archive_multi (data, (GList *)internal, progress) < 0) {\n            if (progress->canceled)\n                seaf_warning (\"Archiving multi files in repo %.8s canceled.\\n\", store_id);\n            else\n                seaf_warning (\"Failed to archive multi files in repo %.8s.\\n\", store_id);\n            ret = -1;\n        }\n    }\n\n    if (archive_write_free (data->a) < 0) {\n        seaf_warning (\"Failed to archive write finish for %s in repo %.8s.\\n\",\n                      strcmp (dirname, \"\")==0 ? \"multi files\" : dirname, store_id);\n        ret = -1;\n    }\n\n    close (data->tmp_fd);\n    free (data);\n\n    return ret;\n}\n#endif\n"
  },
  {
    "path": "server/pack-dir.h",
    "content": "#ifndef PACK_DIR_H\n#define PACK_DIR_H\n#ifdef HAVE_EVHTP\n\n/* Pack a seafile directory to a zipped archive, saved in a temporary file.\n   Return the path of this temporary file.\n */\n\ntypedef struct Progress {\n    int zipped;\n    int total;\n    char *zip_file_path;\n    gint64 expire_ts;\n    gboolean canceled;\n    gboolean size_too_large;\n    gboolean internal_error;\n} Progress;\n\nint\npack_files (const char *store_id,\n            int repo_version,\n            const char *dirname,\n            void *internal,\n            SeafileCrypt *crypt,\n            gboolean is_windows,\n            Progress *progress);\n#endif\n\n#endif\n"
  },
  {
    "path": "server/passwd-mgr.c",
    "content": "#include \"common.h\"\n#include \"log.h\"\n\n#include <glib.h>\n#include <timer.h>\n\n#include \"seafile-session.h\"\n#include \"seafile-object.h\"\n#include \"seafile-error.h\"\n#include \"seafile-crypt.h\"\n\n#include \"utils.h\"\n\n#define REAP_INTERVAL 60\n#define REAP_THRESHOLD 3600\n\ntypedef struct {\n    int enc_version;\n    unsigned char key[32];\n    unsigned char iv[16];\n    guint64 expire_time;\n} DecryptKey;\n\nstruct _SeafPasswdManagerPriv {\n    GHashTable *decrypt_keys;\n    CcnetTimer *reap_timer;\n};\n\nstatic int reap_expired_passwd (void *vmgr);\n\nstatic void\ndecrypt_key_free (DecryptKey *key)\n{\n    if (!key) return;\n\n    /* clear sensitive information */\n    memset (key->key, 0, sizeof(key->key));\n    memset (key->iv, 0, sizeof(key->iv));\n    g_free (key);\n}\n\nSeafPasswdManager *\nseaf_passwd_manager_new (struct _SeafileSession *session)\n{\n    SeafPasswdManager *mgr = g_new0 (SeafPasswdManager, 1);\n\n    mgr->session = session;\n    mgr->priv = g_new0 (struct _SeafPasswdManagerPriv, 1);\n    mgr->priv->decrypt_keys = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                     g_free,\n                                                     (GDestroyNotify)decrypt_key_free);\n\n    return mgr;\n}\n\nint\nseaf_passwd_manager_start (SeafPasswdManager *mgr)\n{\n    mgr->priv->reap_timer = ccnet_timer_new (reap_expired_passwd,\n                                             mgr, REAP_INTERVAL * 1000);\n    return 1;\n}\n\nint\nseaf_passwd_manager_check_passwd (SeafPasswdManager *mgr,\n                                  const char *repo_id,\n                                  const char *magic,\n                                  GError **error)\n{\n    SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo\");\n        return -1;\n    }\n\n    if (!repo->encrypted) {\n        seaf_repo_unref (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Repo is not encrypted\");\n        return -1;\n    }\n\n    if (strcmp (magic, repo->magic) != 0) {\n        seaf_repo_unref (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Incorrect password\");\n        return -1;\n    }\n\n    seaf_repo_unref (repo);\n\n    return 0;\n}\n\nint\nseaf_passwd_manager_set_passwd (SeafPasswdManager *mgr,\n                                const char *repo_id,\n                                const char *user,\n                                const char *passwd,\n                                GError **error)\n{\n    SeafRepo *repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    DecryptKey *crypt_key;\n    GString *hash_key;\n\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo\");\n        return -1;\n    }\n\n    if (!repo->encrypted) {\n        seaf_repo_unref (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Repo is not encrypted\");\n        return -1;\n    }\n\n    if (repo->enc_version != 1 && repo->enc_version != 2 && repo->enc_version != 3 && repo->enc_version != 4) {\n        seaf_repo_unref (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Unsupported encryption version\");\n        return -1;\n    }\n\n    if (repo->pwd_hash_algo) {\n        if (seafile_pwd_hash_verify_repo_passwd (repo->enc_version, repo->id, passwd, repo->salt,\n                                                 repo->pwd_hash, repo->pwd_hash_algo, repo->pwd_hash_params) < 0) {\n            seaf_repo_unref (repo);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Incorrect password\");\n            return -1;\n        }\n    } else {\n        if (seafile_verify_repo_passwd (repo->id, passwd,\n                                        repo->magic,\n                                        repo->enc_version, repo->salt) < 0) {\n            seaf_repo_unref (repo);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Incorrect password\");\n            return -1;\n        }\n    }\n\n    crypt_key = g_new0 (DecryptKey, 1);\n    if (!crypt_key) {\n        seaf_warning (\"Failed to alloc crypt key struct.\\n\");\n        seaf_repo_unref (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Internal server error\");\n        return -1;\n    }\n\n    if (seafile_decrypt_repo_enc_key (repo->enc_version, passwd, repo->random_key, repo->salt,\n                                      crypt_key->key, crypt_key->iv) < 0) {\n        seaf_repo_unref (repo);\n        g_free (crypt_key);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Incorrect password\");\n        return -1;\n    }\n    crypt_key->expire_time = (guint64)time(NULL) + REAP_THRESHOLD;\n    crypt_key->enc_version = repo->enc_version;\n\n    hash_key = g_string_new (NULL);\n    g_string_printf (hash_key, \"%s.%s\", repo_id, user);\n\n    /* g_debug (\"[passwd mgr] Set passwd for %s\\n\", hash_key->str); */\n\n    g_hash_table_insert (mgr->priv->decrypt_keys,\n                         g_string_free (hash_key, FALSE),\n                         crypt_key);\n    seaf_repo_unref (repo);\n\n    return 0;\n}\n\nint\nseaf_passwd_manager_unset_passwd (SeafPasswdManager *mgr,\n                                  const char *repo_id,\n                                  const char *user,\n                                  GError **error)\n{\n    GString *hash_key;\n\n    hash_key = g_string_new (NULL);\n    g_string_printf (hash_key, \"%s.%s\", repo_id, user);\n    g_hash_table_remove (mgr->priv->decrypt_keys, hash_key->str);\n    g_string_free (hash_key, TRUE);\n\n    return 0;\n}     \n\ngboolean\nseaf_passwd_manager_is_passwd_set (SeafPasswdManager *mgr,\n                                   const char *repo_id,\n                                   const char *user)\n{\n    GString *key = g_string_new (NULL);\n    gboolean ret = FALSE;\n\n    g_string_printf (key, \"%s.%s\", repo_id, user);\n    /* g_debug (\"[passwd mgr] check passwd for %s\\n\", key->str); */\n    if (g_hash_table_lookup (mgr->priv->decrypt_keys, key->str) != NULL)\n        ret = TRUE;\n    g_string_free (key, TRUE);\n\n    return ret;\n}\n\nSeafileCryptKey *\nseaf_passwd_manager_get_decrypt_key (SeafPasswdManager *mgr,\n                                     const char *repo_id,\n                                     const char *user)\n{\n    GString *hash_key;\n    DecryptKey *crypt_key;\n    SeafileCryptKey *ret;\n    char key_hex[65], iv_hex[65];\n\n    hash_key = g_string_new (NULL);\n    g_string_printf (hash_key, \"%s.%s\", repo_id, user);\n\n    /* g_debug (\"[passwd mgr] get passwd for %s.\\n\", hash_key->str); */\n\n    crypt_key = g_hash_table_lookup (mgr->priv->decrypt_keys, hash_key->str);\n    if (!crypt_key) {\n        g_string_free (hash_key, TRUE);\n        return NULL;\n    }\n\n    if (crypt_key->enc_version >= 2) {\n        rawdata_to_hex (crypt_key->key, key_hex, 32);\n        rawdata_to_hex (crypt_key->iv, iv_hex, 16);\n    } else if (crypt_key->enc_version == 1) {\n        rawdata_to_hex (crypt_key->key, key_hex, 16);\n        rawdata_to_hex (crypt_key->iv, iv_hex, 16);\n    }\n\n    ret = seafile_crypt_key_new ();\n    g_object_set (ret, \"key\", key_hex, \"iv\", iv_hex, NULL);\n\n    g_string_free (hash_key, TRUE);\n    return ret;\n}\n\nint\nseaf_passwd_manager_get_decrypt_key_raw (SeafPasswdManager *mgr,\n                                         const char *repo_id,\n                                         const char *user,\n                                         unsigned char *key_out,\n                                         unsigned char *iv_out)\n{\n    GString *hash_key;\n    DecryptKey *crypt_key;\n\n    hash_key = g_string_new (NULL);\n    g_string_printf (hash_key, \"%s.%s\", repo_id, user);\n\n    crypt_key = g_hash_table_lookup (mgr->priv->decrypt_keys, hash_key->str);\n    if (!crypt_key) {\n        g_string_free (hash_key, TRUE);\n        return -1;\n    }\n    g_string_free (hash_key, TRUE);\n\n    if (crypt_key->enc_version == 1) {\n        memcpy (key_out, crypt_key->key, 16);\n        memcpy (iv_out, crypt_key->iv, 16);\n    } else if (crypt_key->enc_version >= 2) {\n        memcpy (key_out, crypt_key->key, 32);\n        memcpy (iv_out, crypt_key->iv, 16);\n    }\n\n    return 0;\n}\n\nstatic int\nreap_expired_passwd (void *vmgr)\n{\n    SeafPasswdManager *mgr = vmgr;\n    GHashTableIter iter;\n    gpointer key, value;\n    DecryptKey *crypt_key;\n    guint64 now = (guint64)time(NULL);\n\n    g_hash_table_iter_init (&iter, mgr->priv->decrypt_keys);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        crypt_key = value;\n        if (crypt_key->expire_time <= now) {\n            /* g_debug (\"[passwd mgr] Remove passwd for %s\\n\", (char *)key); */\n            g_hash_table_iter_remove (&iter);\n        }\n    }\n\n    return 1;\n}\n"
  },
  {
    "path": "server/passwd-mgr.h",
    "content": "#ifndef PASSWD_MGR_H\n#define PASSWD_MGR_H\n\n#include <glib.h>\n\nstruct _SeafileSession;\nstruct _SeafPasswdManagerPriv;\nstruct _SeafileCryptKey;\n\nstruct _SeafPasswdManager {\n    struct _SeafileSession *session;\n    struct _SeafPasswdManagerPriv *priv;\n};\ntypedef struct _SeafPasswdManager SeafPasswdManager;\n\nSeafPasswdManager *\nseaf_passwd_manager_new (struct _SeafileSession *session);\n\nint\nseaf_passwd_manager_start (SeafPasswdManager *mgr);\n\n/**\n * Check password @magic to access contents of @repo_id.\n * This function:\n * 1. check whether @magic is correct;\n *\n * Returns 0 if password @magic is correct, -1 otherwise.\n */\nint\nseaf_passwd_manager_check_passwd (SeafPasswdManager *mgr,\n                                  const char *repo_id,\n                                  const char *magic,\n                                  GError **error);\n/**\n * Set @passwd for @user to access contents of @repo_id.\n * This function:\n * 1. check whether @passwd is correct;\n * 2. calculate and store decryption key based on @passwd in memory.\n *\n * Returns 0 if @passwd is correct, -1 otherwise.\n */\nint\nseaf_passwd_manager_set_passwd (SeafPasswdManager *mgr,\n                                const char *repo_id,\n                                const char *user,\n                                const char *passwd,\n                                GError **error);\n\n/**\n * Returns 0 if successfully unset user password, -1 otherwise.\n */\nint\nseaf_passwd_manager_unset_passwd (SeafPasswdManager *mgr,\n                                  const char *repo_id,\n                                  const char *user,\n                                  GError **error);\n\n/**\n * Check whether correct passwd has been set for @user\n * to access @repo_id.\n */\ngboolean\nseaf_passwd_manager_is_passwd_set (SeafPasswdManager *mgr,\n                                   const char *repo_id,\n                                   const char *user);\n\n/**\n * Returns decryption key for @repo_id, NULL if it's not set.\n */\nstruct _SeafileCryptKey *\nseaf_passwd_manager_get_decrypt_key (SeafPasswdManager *mgr,\n                                     const char *repo_id,\n                                     const char *user);\n\nint\nseaf_passwd_manager_get_decrypt_key_raw (SeafPasswdManager *mgr,\n                                         const char *repo_id,\n                                         const char *user,\n                                         unsigned char *key_out,\n                                         unsigned char *iv_out);\n\n#endif\n"
  },
  {
    "path": "server/permission-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"db.h\"\n#include \"seafile-session.h\"\n#include \"permission-mgr.h\"\n\n#define PERM_DB \"perm.db\"\n\nstruct _SeafPermManagerPriv {\n    sqlite3    *db;\n};\n\nstatic int load_db (SeafPermManager *mgr);\n\nSeafPermManager *\nseaf_perm_manager_new (SeafileSession *seaf)\n{\n    SeafPermManager *mgr = g_new0 (SeafPermManager, 1);\n    mgr->priv = g_new0 (SeafPermManagerPriv, 1);\n    mgr->seaf = seaf;\n    return mgr;\n}\n\nint\nseaf_perm_manager_init (SeafPermManager *mgr)\n{\n    return load_db (mgr);\n}\n\nstatic int\nload_db (SeafPermManager *mgr)\n{\n    char *db_path = g_build_filename (mgr->seaf->seaf_dir, PERM_DB, NULL);\n    if (sqlite_open_db (db_path, &mgr->priv->db) < 0) {\n        g_critical (\"[Permission mgr] Failed to open permission db\\n\");\n        g_free (db_path);\n        g_free (mgr);\n        return -1;\n    }\n    g_free (db_path);\n\n    const char *sql;\n\n    return 0;\n}\n\n"
  },
  {
    "path": "server/permission-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_PERM_MGR_H\n#define SEAF_PERM_MGR_H\n\n#include <glib.h>\n\nstruct _SeafileSession;\n\ntypedef struct _SeafPermManager SeafPermManager;\ntypedef struct _SeafPermManagerPriv SeafPermManagerPriv;\n\nstruct _SeafPermManager {\n    struct _SeafileSession *seaf;\n\n    SeafPermManagerPriv *priv;\n};\n\nSeafPermManager*\nseaf_perm_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_perm_manager_init (SeafPermManager *mgr);\n\nint\nseaf_perm_manager_set_repo_owner (SeafPermManager *mgr,\n                                  const char *repo_id,\n                                  const char *user_id);\n\nchar *\nseaf_perm_manager_get_repo_owner (SeafPermManager *mgr,\n                                  const char *repo_id);\n\n/* TODO: add start and limit. */\n/* Get repos owned by this user.\n */\nGList *\nseaf_perm_manager_get_repos_by_owner (SeafPermManager *mgr,\n                                      const char *user_id);\n\n#endif\n"
  },
  {
    "path": "server/quota-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"seaf-db.h\"\n#include \"quota-mgr.h\"\n#include \"seaf-utils.h\"\n\n#define KB 1000L\n#define MB 1000000L\n#define GB 1000000000L\n#define TB 1000000000000L\n\nstatic gint64\nget_default_quota (SeafCfgManager *mgr)\n{\n    char *quota_str;\n    char *end;\n    gint64 quota_int;\n    gint64 multiplier = GB;\n    gint64 quota;\n\n    quota_str = seaf_cfg_manager_get_config_string (mgr, \"quota\", \"default\");\n    if (!quota_str)\n        return INFINITE_QUOTA;\n\n    quota_int = strtoll (quota_str, &end, 10);\n    if (quota_int == LLONG_MIN || quota_int == LLONG_MAX) {\n        seaf_warning (\"Default quota value out of range. Use unlimited.\\n\");\n        quota = INFINITE_QUOTA;\n        goto out;\n    }\n\n    if (*end != '\\0') {\n        if (strcasecmp(end, \"kb\") == 0 || strcasecmp(end, \"k\") == 0)\n            multiplier = KB;\n        else if (strcasecmp(end, \"mb\") == 0 || strcasecmp(end, \"m\") == 0)\n            multiplier = MB;\n        else if (strcasecmp(end, \"gb\") == 0 || strcasecmp(end, \"g\") == 0)\n            multiplier = GB;\n        else if (strcasecmp(end, \"tb\") == 0 || strcasecmp(end, \"t\") == 0)\n            multiplier = TB;\n        else {\n            seaf_warning (\"Invalid default quota format %s. Use unlimited.\\n\", quota_str);\n            quota = INFINITE_QUOTA;\n            goto out;\n        }\n    }\n\n    quota = quota_int * multiplier;\n\nout:\n    g_free (quota_str);\n    return quota;\n}\n\nSeafQuotaManager *\nseaf_quota_manager_new (struct _SeafileSession *session)\n{\n    SeafQuotaManager *mgr = g_new0 (SeafQuotaManager, 1);\n    if (!mgr)\n        return NULL;\n    mgr->session = session;\n\n    mgr->calc_share_usage = g_key_file_get_boolean (session->config,\n                                                    \"quota\", \"calc_share_usage\",\n                                                    NULL);\n\n    return mgr;\n}\n\nint\nseaf_quota_manager_init (SeafQuotaManager *mgr)\n{\n\n    if (!mgr->session->create_tables && seaf_db_type (mgr->session->db) != SEAF_DB_TYPE_PGSQL)\n        return 0;\n\n    SeafDB *db = mgr->session->db;\n    const char *sql;\n\n    switch (seaf_db_type(db)) {\n    case SEAF_DB_TYPE_PGSQL:\n        sql = \"CREATE TABLE IF NOT EXISTS UserQuota (\\\"user\\\" VARCHAR(255) PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserShareQuota (\\\"user\\\" VARCHAR(255) PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER,\"\n            \"\\\"user\\\" VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, \\\"user\\\"))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        break;\n    case SEAF_DB_TYPE_SQLITE:\n        sql = \"CREATE TABLE IF NOT EXISTS UserQuota (user VARCHAR(255) PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserShareQuota (user VARCHAR(255) PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgQuota (org_id INTEGER PRIMARY KEY,\"\n            \"quota BIGINT)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgUserQuota (org_id INTEGER,\"\n            \"user VARCHAR(255), quota BIGINT, PRIMARY KEY (org_id, user))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        break;\n    case SEAF_DB_TYPE_MYSQL:\n        sql = \"CREATE TABLE IF NOT EXISTS UserQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"user VARCHAR(255),\"\n            \"quota BIGINT, UNIQUE INDEX(user)) ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS UserShareQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"user VARCHAR(255),\"\n            \"quota BIGINT, UNIQUE INDEX(user)) ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"org_id INTEGER,\"\n            \"quota BIGINT, UNIQUE INDEX(org_id)) ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        sql = \"CREATE TABLE IF NOT EXISTS OrgUserQuota (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n            \"org_id INTEGER,\"\n            \"user VARCHAR(255), quota BIGINT, UNIQUE INDEX(org_id, user))\"\n            \"ENGINE=INNODB\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n\n        break;\n    }\n\n    return 0;\n}\n\nint\nseaf_quota_manager_set_user_quota (SeafQuotaManager *mgr,\n                                   const char *user,\n                                   gint64 quota)\n{\n    SeafDB *db = mgr->session->db;\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        exists = seaf_db_statement_exists (db,\n                                           \"SELECT 1 FROM UserQuota WHERE \\\"user\\\"=?\",\n                                           &err, 1, \"string\", user);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE UserQuota SET quota=? \"\n                                          \"WHERE \\\"user\\\"=?\",\n                                          2, \"int64\", quota, \"string\", user);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO UserQuota (\\\"user\\\", quota) VALUES \"\n                                          \"(?, ?)\",\n                                          2, \"string\", user, \"int64\", quota);\n        return rc;\n    } else {\n        int rc;\n        rc = seaf_db_statement_query (db,\n                                      \"REPLACE INTO UserQuota (user, quota) VALUES (?, ?)\",\n                                      2, \"string\", user, \"int64\", quota);\n        return rc;\n    }\n}\n\ngint64\nseaf_quota_manager_get_user_quota (SeafQuotaManager *mgr,\n                                   const char *user)\n{\n    char *sql;\n    gint64 quota;\n\n    if (seaf_db_type(mgr->session->db) != SEAF_DB_TYPE_PGSQL)\n        sql = \"SELECT quota FROM UserQuota WHERE user=?\";\n    else\n        sql = \"SELECT quota FROM UserQuota WHERE \\\"user\\\"=?\";\n\n    quota = seaf_db_statement_get_int64 (mgr->session->db, sql,\n                                         1, \"string\", user);\n    if (quota <= 0)\n        quota = get_default_quota (seaf->cfg_mgr);\n\n    return quota;\n}\n\nint\nseaf_quota_manager_set_org_quota (SeafQuotaManager *mgr,\n                                  int org_id,\n                                  gint64 quota)\n{\n    SeafDB *db = mgr->session->db;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        exists = seaf_db_statement_exists (db,\n                                           \"SELECT 1 FROM OrgQuota WHERE org_id=?\",\n                                           &err, 1, \"int\", org_id);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE OrgQuota SET quota=? WHERE org_id=?\",\n                                          2, \"int64\", quota, \"int\", org_id);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO OrgQuota (org_id, quota) VALUES (?, ?)\",\n                                          2, \"int\", org_id, \"int64\", quota);\n        return rc;\n    } else {\n        int rc = seaf_db_statement_query (db,\n                                          \"REPLACE INTO OrgQuota (org_id, quota) VALUES (?, ?)\",\n                                          2, \"int\", org_id, \"int64\", quota);\n        return rc;\n    }\n}\n\ngint64\nseaf_quota_manager_get_org_quota (SeafQuotaManager *mgr,\n                                  int org_id)\n{\n    char *sql;\n    gint64 quota;\n\n    sql = \"SELECT quota FROM OrgQuota WHERE org_id=?\";\n    quota = seaf_db_statement_get_int64 (mgr->session->db, sql, 1, \"int\", org_id);\n    if (quota <= 0)\n        quota = get_default_quota (seaf->cfg_mgr);\n\n    return quota;\n}\n\nint\nseaf_quota_manager_set_org_user_quota (SeafQuotaManager *mgr,\n                                       int org_id,\n                                       const char *user,\n                                       gint64 quota)\n{\n    SeafDB *db = mgr->session->db;\n    int rc;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n\n        exists = seaf_db_statement_exists (db,\n                                           \"SELECT 1 FROM OrgUserQuota \"\n                                           \"WHERE org_id=? AND \\\"user\\\"=?\",\n                                           &err, 2, \"int\", org_id, \"string\", user);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE OrgUserQuota SET quota=?\"\n                                          \" WHERE org_id=? AND \\\"user\\\"=?\",\n                                          3, \"int64\", quota, \"int\", org_id,\n                                          \"string\", user);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO OrgUserQuota (org_id, \\\"user\\\", quota) VALUES \"\n                                          \"(?, ?, ?)\",\n                                          3, \"int\", org_id, \"string\", user,\n                                          \"int64\", quota);\n        return rc;\n    } else {\n        rc = seaf_db_statement_query (db,\n                                      \"REPLACE INTO OrgUserQuota (org_id, user, quota) VALUES (?, ?, ?)\",\n                                      3, \"int\", org_id, \"string\", user, \"int64\", quota);\n        return rc;\n    }\n}\n\ngint64\nseaf_quota_manager_get_org_user_quota (SeafQuotaManager *mgr,\n                                       int org_id,\n                                       const char *user)\n{\n    char *sql;\n    gint64 quota;\n\n    if (seaf_db_type(mgr->session->db) != SEAF_DB_TYPE_PGSQL)\n        sql = \"SELECT quota FROM OrgUserQuota WHERE org_id=? AND user=?\";\n    else\n        sql = \"SELECT quota FROM OrgUserQuota WHERE org_id=? AND \\\"user\\\"=?\";\n\n    quota = seaf_db_statement_get_int64 (mgr->session->db, sql,\n                                         2, \"int\", org_id, \"string\", user);\n    /* return org quota if per user quota is not set. */\n    if (quota <= 0)\n        quota = seaf_quota_manager_get_org_quota (mgr, org_id);\n\n    return quota;\n}\n\nstatic void\ncount_group_members (GHashTable *user_hash, GList *members)\n{\n    GList *p;\n    CcnetGroupUser *user;\n    const char *user_name;\n    int dummy;\n\n    for (p = members; p; p = p->next) {\n        user = p->data;\n        user_name = ccnet_group_user_get_user_name (user);\n        g_hash_table_insert (user_hash, g_strdup(user_name), &dummy);\n        /* seaf_debug (\"Shared to %s.\\n\", user_name); */\n        g_object_unref (user);\n    }\n\n    g_list_free (members);\n}\n\nstatic gint\nget_num_shared_to (const char *user, const char *repo_id)\n{\n    GHashTable *user_hash;\n    int dummy;\n    GList *personal = NULL, *groups = NULL, *members = NULL, *p;\n    gint n_shared_to = -1;\n\n    /* seaf_debug (\"Computing share usage for repo %s.\\n\", repo_id); */\n\n    /* If a repo is shared to both a user and a group, and that user is also\n     * a member of the group, we don't want to count that user twice.\n     * This also applies to two groups with overlapped members.\n     * So we have to use a hash table to filter out duplicated users.\n     */\n    user_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    /* First count personal share */\n    personal = seaf_share_manager_list_shared_to (seaf->share_mgr, user, repo_id);\n    for (p = personal; p; p = p->next) {\n        char *email = p->data;\n        g_hash_table_insert (user_hash, g_strdup(email), &dummy);\n        /* seaf_debug (\"Shared to %s.\\n\", email); */\n    }\n\n    /* Then groups... */\n    groups = seaf_repo_manager_get_groups_by_repo (seaf->repo_mgr,\n                                                   repo_id, NULL);\n    for (p = groups; p; p = p->next) {\n        members = ccnet_group_manager_get_group_members (seaf->group_mgr, (int)(long)p->data, -1, -1, NULL);\n        if (!members) {\n            seaf_warning (\"Cannot get member list for groupd %d.\\n\", (int)(long)p->data);\n            goto out;\n        }\n\n        count_group_members (user_hash, members);\n    }\n\n    /* Remove myself if i'm in a group. */\n    g_hash_table_remove (user_hash, user);\n\n    n_shared_to = g_hash_table_size(user_hash);\n    /* seaf_debug (\"n_shared_to = %u.\\n\", n_shared_to); */\n\nout:\n    g_hash_table_destroy (user_hash);\n    string_list_free (personal);\n    g_list_free (groups);\n\n    return n_shared_to;\n}\n\nint\nseaf_quota_manager_check_quota_with_delta (SeafQuotaManager *mgr,\n                                           const char *repo_id,\n                                           gint64 delta)\n{\n    SeafVirtRepo *vinfo;\n    const char *r_repo_id = repo_id;\n    char *user = NULL;\n    gint64 quota, usage;\n    int ret = 0;\n\n    /* If it's a virtual repo, check quota to origin repo. */\n    vinfo = seaf_repo_manager_get_virtual_repo_info (seaf->repo_mgr, repo_id);\n    if (vinfo)\n        r_repo_id = vinfo->origin_repo_id;\n\n    user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, r_repo_id);\n    if (user != NULL) {\n        if (g_strrstr (user, \"dtable@seafile\") != NULL)\n            goto out;\n        quota = seaf_quota_manager_get_user_quota (mgr, user);\n    } else {\n        seaf_warning (\"Repo %s has no owner.\\n\", r_repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    if (quota == INFINITE_QUOTA)\n        goto out;\n\n    usage = seaf_quota_manager_get_user_usage (mgr, user);\n    if (usage < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    if (delta != 0) {\n        usage += delta;\n    }\n    if (usage >= quota) {\n        ret = 1;\n    }\n\nout:\n    seaf_virtual_repo_info_free (vinfo);\n    g_free (user);\n    return ret;\n}\n\nint\nseaf_quota_manager_check_quota (SeafQuotaManager *mgr,\n                                const char *repo_id)\n{\n    int ret = seaf_quota_manager_check_quota_with_delta (mgr, repo_id, 0);\n\n    if (ret == 1) {\n        return -1;\n    }\n    return ret;\n}\n\ngint64\nseaf_quota_manager_get_user_usage (SeafQuotaManager *mgr, const char *user)\n{\n    char *sql;\n\n    sql = \"SELECT SUM(size) FROM \"\n        \"RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, \"\n        \"RepoSize WHERE \"\n        \"owner_id=? AND o.repo_id=RepoSize.repo_id \"\n        \"AND v.repo_id IS NULL\";\n\n    return seaf_db_statement_get_int64 (mgr->session->db, sql,\n                                        1, \"string\", user);\n\n    /* Add size of repos in trash. */\n    /* sql = \"SELECT size FROM RepoTrash WHERE owner_id = ?\"; */\n    /* if (seaf_db_statement_foreach_row (mgr->session->db, sql, */\n    /*                                    get_total_size, &total, */\n    /*                                    1, \"string\", user) < 0) */\n    /*     return -1; */\n}\n\nstatic gint64\nrepo_share_usage (const char *user, const char *repo_id)\n{\n    gint n_shared_to = get_num_shared_to (user, repo_id);\n    if (n_shared_to < 0) {\n        return -1;\n    } else if (n_shared_to == 0) {\n        return 0;\n    }\n\n    gint64 size = seaf_repo_manager_get_repo_size (seaf->repo_mgr, repo_id);\n    if (size < 0) {\n        seaf_warning (\"Cannot get size of repo %s.\\n\", repo_id);\n        return -1;\n    }\n\n    /* share_usage = repo_size * n_shared_to */\n    gint64 usage = size * n_shared_to;\n\n    return usage;\n}\n\ngint64\nseaf_quota_manager_get_user_share_usage (SeafQuotaManager *mgr,\n                                         const char *user)\n{\n    GList *repos, *p;\n    char *repo_id;\n    gint64 total = 0, per_repo;\n\n    repos = seaf_repo_manager_get_repo_ids_by_owner (seaf->repo_mgr, user);\n\n    for (p = repos; p != NULL; p = p->next) {\n        repo_id = p->data;\n        per_repo = repo_share_usage (user, repo_id);\n        if (per_repo < 0) {\n            seaf_warning (\"Failed to get repo %s share usage.\\n\", repo_id);\n            string_list_free (repos);\n            return -1;\n        }\n\n        total += per_repo;\n    }\n\n    string_list_free (repos);\n    return total;\n}\n\ngint64\nseaf_quota_manager_get_org_usage (SeafQuotaManager *mgr, int org_id)\n{\n    char *sql;\n\n    sql = \"SELECT SUM(size) FROM OrgRepo, RepoSize WHERE \"\n        \"org_id=? AND OrgRepo.repo_id=RepoSize.repo_id\";\n\n    return seaf_db_statement_get_int64 (mgr->session->db, sql,\n                                        1, \"int\", org_id);\n}\n\ngint64\nseaf_quota_manager_get_org_user_usage (SeafQuotaManager *mgr,\n                                       int org_id,\n                                       const char *user)\n{\n    char *sql;\n\n    sql = \"SELECT SUM(size) FROM OrgRepo, RepoSize WHERE \"\n        \"org_id=? AND user = ? AND OrgRepo.repo_id=RepoSize.repo_id\";\n\n    return seaf_db_statement_get_int64 (mgr->session->db, sql,\n                                        2, \"int\", org_id, \"string\", user);\n}\n\nstatic gboolean\ncollect_user_and_usage (SeafDBRow *row, void *data)\n{\n    GList **p = data;\n    const char *user;\n    gint64 usage;\n\n    user = seaf_db_row_get_column_text (row, 0);\n    usage = seaf_db_row_get_column_int64 (row, 1);\n\n    if (!user)\n        return TRUE;\n\n    SeafileUserQuotaUsage *user_usage= g_object_new (SEAFILE_TYPE_USER_QUOTA_USAGE,\n                                                     \"user\", user,\n                                                     \"usage\", usage,\n                                                     NULL);\n    if (!user_usage)\n        return FALSE;\n\n    *p = g_list_prepend (*p, user_usage);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_quota_manager_list_user_quota_usage (SeafQuotaManager *mgr)\n{\n    GList *ret = NULL;\n    char *sql = NULL;\n\n    sql = \"SELECT owner_id,SUM(size) FROM \"\n          \"RepoOwner o LEFT JOIN VirtualRepo v ON o.repo_id=v.repo_id, \"\n          \"RepoSize WHERE \"\n          \"o.repo_id=RepoSize.repo_id \"\n          \"AND v.repo_id IS NULL \"\n          \"GROUP BY owner_id\";\n\n    if (seaf_db_statement_foreach_row (mgr->session->db, sql,\n                                       collect_user_and_usage,\n                                       &ret, 0) < 0) {\n        g_list_free_full (ret, g_object_unref);\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n"
  },
  {
    "path": "server/quota-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef QUOTA_MGR_H\n#define QUOTA_MGR_H\n\n#define INFINITE_QUOTA (gint64)-2\n\nstruct _SeafQuotaManager {\n    struct _SeafileSession *session;\n\n    gboolean calc_share_usage;\n};\ntypedef struct _SeafQuotaManager SeafQuotaManager;\n\nSeafQuotaManager *\nseaf_quota_manager_new (struct _SeafileSession *session);\n\nint\nseaf_quota_manager_init (SeafQuotaManager *mgr);\n\n/* Set/get quota for a personal account. */\nint\nseaf_quota_manager_set_user_quota (SeafQuotaManager *mgr,\n                                   const char *user,\n                                   gint64 quota);\n\ngint64\nseaf_quota_manager_get_user_quota (SeafQuotaManager *mgr,\n                                   const char *user);\n\ngint64\nseaf_quota_manager_get_user_share_usage (SeafQuotaManager *mgr,\n                                         const char *user);\n\n/*\n * Check if @repo_id still has free space for upload.\n */\nint\nseaf_quota_manager_check_quota (SeafQuotaManager *mgr,\n                                const char *repo_id);\n\n// ret = 0 means doesn't exceed quota,\n// 1 means exceed quota,\n// -1 means internal error\nint\nseaf_quota_manager_check_quota_with_delta (SeafQuotaManager *mgr,\n                                           const char *repo_id,\n                                           gint64 delta);\n\ngint64\nseaf_quota_manager_get_user_usage (SeafQuotaManager *mgr, const char *user);\n\nGList *\nseaf_repo_quota_manager_list_user_quota_usage (SeafQuotaManager *mgr);\n\n#endif\n"
  },
  {
    "path": "server/repo-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <glib/gstdio.h>\n\n#include <openssl/sha.h>\n#include <openssl/rand.h>\n\n#include <timer.h>\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n#include \"seafile-crypt.h\"\n#include \"password-hash.h\"\n\n#include \"seaf-db.h\"\n#include \"seaf-utils.h\"\n\n#define REAP_TOKEN_INTERVAL 300 /* 5 mins */\n#define DECRYPTED_TOKEN_TTL 3600 /* 1 hour */\n#define SCAN_TRASH_DAYS 1 /* one day */\n#define TRASH_EXPIRE_DAYS 30 /* one month */\n\ntypedef struct DecryptedToken {\n    char *token;\n    gint64 reap_time;\n} DecryptedToken;\n\nstruct _SeafRepoManagerPriv {\n    /* (encrypted_token, session_key) -> decrypted token */\n    GHashTable *decrypted_tokens;\n    pthread_rwlock_t lock;\n    CcnetTimer *reap_token_timer;\n\n    CcnetTimer *scan_trash_timer;\n};\n\nstatic void\nload_repo (SeafRepoManager *manager, SeafRepo *repo);\n\nstatic int create_db_tables_if_not_exist (SeafRepoManager *mgr);\n\nstatic int save_branch_repo_map (SeafRepoManager *manager, SeafBranch *branch);\n\nstatic int reap_token (void *data);\nstatic void decrypted_token_free (DecryptedToken *token);\n\ngboolean\nis_repo_id_valid (const char *id)\n{\n    if (!id)\n        return FALSE;\n\n    return is_uuid_valid (id);\n}\n\nSeafRepo*\nseaf_repo_new (const char *id, const char *name, const char *desc)\n{\n    SeafRepo* repo;\n\n    /* valid check */\n  \n    \n    repo = g_new0 (SeafRepo, 1);\n    memcpy (repo->id, id, 36);\n    repo->id[36] = '\\0';\n\n    repo->name = g_strdup(name);\n    repo->desc = g_strdup(desc);\n\n    repo->ref_cnt = 1;\n\n    return repo;\n}\n\nvoid\nseaf_repo_free (SeafRepo *repo)\n{\n    if (repo->name) g_free (repo->name);\n    if (repo->desc) g_free (repo->desc);\n    if (repo->head) seaf_branch_unref (repo->head);\n    if (repo->virtual_info)\n        seaf_virtual_repo_info_free (repo->virtual_info);\n    g_free (repo->last_modifier);\n    g_free (repo->pwd_hash_algo);\n    g_free (repo->pwd_hash_params);\n    g_free (repo->type);\n    g_free (repo);\n}\n\nvoid\nseaf_repo_ref (SeafRepo *repo)\n{\n    g_atomic_int_inc (&repo->ref_cnt);\n}\n\nvoid\nseaf_repo_unref (SeafRepo *repo)\n{\n    if (!repo)\n        return;\n\n    if (g_atomic_int_dec_and_test (&repo->ref_cnt))\n        seaf_repo_free (repo);\n}\n\nstatic void\nset_head_common (SeafRepo *repo, SeafBranch *branch)\n{\n    if (repo->head)\n        seaf_branch_unref (repo->head);\n    repo->head = branch;\n    seaf_branch_ref(branch);\n}\n\nint\nseaf_repo_set_head (SeafRepo *repo, SeafBranch *branch)\n{\n    if (save_branch_repo_map (repo->manager, branch) < 0)\n        return -1;\n    set_head_common (repo, branch);\n    return 0;\n}\n\nvoid\nseaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    repo->name = g_strdup (commit->repo_name);\n    repo->desc = g_strdup (commit->repo_desc);\n    repo->encrypted = commit->encrypted;\n    repo->repaired = commit->repaired;\n    repo->last_modify = commit->ctime;\n    memcpy (repo->root_id, commit->root_id, 40);\n    if (repo->encrypted) {\n        repo->enc_version = commit->enc_version;\n        if (repo->enc_version == 1 && !commit->pwd_hash_algo)\n            memcpy (repo->magic, commit->magic, 32);\n        else if (repo->enc_version == 2) {\n            memcpy (repo->random_key, commit->random_key, 96);\n        } else if (repo->enc_version == 3) {\n            memcpy (repo->random_key, commit->random_key, 96);\n            memcpy (repo->salt, commit->salt, 64);\n        } else if (repo->enc_version == 4) {\n            memcpy (repo->random_key, commit->random_key, 96);\n            memcpy (repo->salt, commit->salt, 64);\n        }\n        if (repo->enc_version >= 2 && !commit->pwd_hash_algo) {\n            memcpy (repo->magic, commit->magic, 64);\n        }\n        if (commit->pwd_hash_algo) {\n            memcpy (repo->pwd_hash, commit->pwd_hash, 64);\n            repo->pwd_hash_algo = g_strdup (commit->pwd_hash_algo);\n            repo->pwd_hash_params = g_strdup (commit->pwd_hash_params);\n        }\n    }\n    repo->no_local_history = commit->no_local_history;\n    repo->version = commit->version;\n    repo->last_modifier = g_strdup (commit->creator_name);\n}\n\nvoid\nseaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit)\n{\n    commit->repo_name = g_strdup (repo->name);\n    commit->repo_desc = g_strdup (repo->desc);\n    commit->encrypted = repo->encrypted;\n    commit->repaired = repo->repaired;\n    if (commit->encrypted) {\n        commit->enc_version = repo->enc_version;\n        if (commit->enc_version == 1 && !repo->pwd_hash_algo)\n            commit->magic = g_strdup (repo->magic);\n        else if (commit->enc_version == 2) {\n            commit->random_key = g_strdup (repo->random_key);\n        } else if (commit->enc_version == 3) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        } else if (commit->enc_version == 4) {\n            commit->random_key = g_strdup (repo->random_key);\n            commit->salt = g_strdup (repo->salt);\n        }\n        if (commit->enc_version >= 2 && !repo->pwd_hash_algo) {\n            commit->magic = g_strdup (repo->magic);\n        }\n        if (repo->pwd_hash_algo) {\n            commit->pwd_hash = g_strdup (repo->pwd_hash);\n            commit->pwd_hash_algo = g_strdup (repo->pwd_hash_algo);\n            commit->pwd_hash_params = g_strdup (repo->pwd_hash_params);\n        }\n    }\n    commit->no_local_history = repo->no_local_history;\n    commit->version = repo->version;\n}\n\nstatic gboolean\ncollect_commit (SeafCommit *commit, void *vlist, gboolean *stop)\n{\n    GList **commits = vlist;\n\n    /* The traverse function will unref the commit, so we need to ref it.\n     */\n    seaf_commit_ref (commit);\n    *commits = g_list_prepend (*commits, commit);\n    return TRUE;\n}\n\nGList *\nseaf_repo_get_commits (SeafRepo *repo)\n{\n    GList *branches;\n    GList *ptr;\n    SeafBranch *branch;\n    GList *commits = NULL;\n\n    branches = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo->id);\n    if (branches == NULL) {\n        seaf_warning (\"Failed to get branch list of repo %s.\\n\", repo->id);\n        return NULL;\n    }\n\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        branch = ptr->data;\n        gboolean res = seaf_commit_manager_traverse_commit_tree (seaf->commit_mgr,\n                                                                 repo->id,\n                                                                 repo->version,\n                                                                 branch->commit_id,\n                                                                 collect_commit,\n                                                                 &commits,\n                                                                 FALSE);\n        if (!res) {\n            for (ptr = commits; ptr != NULL; ptr = ptr->next)\n                seaf_commit_unref ((SeafCommit *)(ptr->data));\n            g_list_free (commits);\n            goto out;\n        }\n    }\n\n    commits = g_list_reverse (commits);\n\nout:\n    for (ptr = branches; ptr != NULL; ptr = ptr->next) {\n        seaf_branch_unref ((SeafBranch *)ptr->data);\n    }\n    return commits;\n}\n\ngboolean\nshould_ignore_file(const char *filename, void *data)\n{\n    /* GPatternSpec **spec = ignore_patterns; */\n\n    char **components = g_strsplit (filename, \"/\", -1);\n    int n_comps = g_strv_length (components);\n    int j = 0;\n    char *file_name;\n\n    for (; j < n_comps; ++j) {\n        file_name = components[j];\n        if (g_strcmp0(file_name, \"..\") == 0) {\n            g_strfreev (components);\n            return TRUE;\n        }\n    }\n    g_strfreev (components);\n\n    if (!g_utf8_validate (filename, -1, NULL)) {\n        seaf_warning (\"File name %s contains non-UTF8 characters, skip.\\n\", filename);\n        return TRUE;\n    }\n\n    /* Ignore file/dir if its name is too long. */\n    if (strlen(filename) >= SEAF_DIR_NAME_LEN)\n        return TRUE;\n\n    if (strchr (filename, '/'))\n        return TRUE;\n\n    return FALSE;\n}\n\nstatic gboolean\ncollect_repo_id (SeafDBRow *row, void *data);\n\nstatic int\nscan_trash (void *data)\n{\n    GList *repo_ids = NULL;\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    gint64 trash_expire_interval = TRASH_EXPIRE_DAYS * 24 * 3600;\n    int expire_days = seaf_cfg_manager_get_config_int (seaf->cfg_mgr,\n                                                       \"library_trash\",\n                                                       \"expire_days\");\n    if (expire_days > 0) {\n        trash_expire_interval = expire_days * 24 * 3600;\n    }\n\n    gint64 expire_time = time(NULL) - trash_expire_interval;\n    char *sql = \"SELECT repo_id FROM RepoTrash WHERE del_time <= ?\";\n\n    int ret = seaf_db_statement_foreach_row (seaf->db, sql,\n                                             collect_repo_id, &repo_ids,\n                                             1, \"int64\", expire_time);\n    if (ret < 0) {\n        seaf_warning (\"Get expired repo from trash failed.\");\n        string_list_free (repo_ids);\n        return TRUE;\n    }\n\n    GList *iter;\n    char *repo_id;\n    for (iter=repo_ids; iter; iter=iter->next) {\n        repo_id = iter->data;\n        ret = seaf_repo_manager_del_repo_from_trash (mgr, repo_id, NULL);\n        if (ret < 0)\n            break;\n    }\n\n    string_list_free (repo_ids);\n\n    return TRUE;\n}\n\nstatic void\ninit_scan_trash_timer (SeafRepoManagerPriv *priv, GKeyFile *config)\n{\n    int scan_days;\n    GError *error = NULL;\n\n    scan_days = g_key_file_get_integer (config,\n                                        \"library_trash\", \"scan_days\",\n                                        &error);\n    if (error) {\n       scan_days = SCAN_TRASH_DAYS;\n       g_clear_error (&error);\n    }\n\n    priv->scan_trash_timer = ccnet_timer_new (scan_trash, NULL,\n                                              scan_days * 24 * 3600 * 1000);\n}\n\nSeafRepoManager*\nseaf_repo_manager_new (SeafileSession *seaf)\n{\n    SeafRepoManager *mgr = g_new0 (SeafRepoManager, 1);\n\n    mgr->priv = g_new0 (SeafRepoManagerPriv, 1);\n    mgr->seaf = seaf;\n\n    mgr->priv->decrypted_tokens = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                         g_free,\n                                                         (GDestroyNotify)decrypted_token_free);\n    pthread_rwlock_init (&mgr->priv->lock, NULL);\n    mgr->priv->reap_token_timer = ccnet_timer_new (reap_token, mgr,\n                                                   REAP_TOKEN_INTERVAL * 1000);\n\n    init_scan_trash_timer (mgr->priv, seaf->config);\n\n    return mgr;\n}\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr)\n{\n    /* On the server, we load repos into memory on-demand, because\n     * there are too many repos.\n     */\n    if (create_db_tables_if_not_exist (mgr) < 0) {\n        seaf_warning (\"[repo mgr] failed to create tables.\\n\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_init_merge_scheduler() < 0) {\n        seaf_warning (\"Failed to init merge scheduler.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr)\n{\n    return 0;\n}\n\nint\nseaf_repo_manager_add_repo (SeafRepoManager *manager,\n                            SeafRepo *repo)\n{\n    SeafDB *db = manager->seaf->db;\n\n    if (seaf_db_statement_query (db, \"INSERT INTO Repo (repo_id) VALUES (?)\",\n                                 1, \"string\", repo->id) < 0)\n        return -1;\n\n    repo->manager = manager;\n\n    return 0;\n}\n\nstatic int\nadd_deleted_repo_record (SeafRepoManager *mgr, const char *repo_id)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n\n        exists = seaf_db_statement_exists (seaf->db,\n                                           \"SELECT repo_id FROM GarbageRepos \"\n                                           \"WHERE repo_id=?\",\n                                           &err, 1, \"string\", repo_id);\n        if (err)\n            return -1;\n\n        if (!exists) {\n            return seaf_db_statement_query(seaf->db,\n                                           \"INSERT INTO GarbageRepos (repo_id) VALUES (?)\",\n                                           1, \"string\", repo_id);\n        }\n\n        return 0;\n    } else {\n        return seaf_db_statement_query (seaf->db,\n                                        \"REPLACE INTO GarbageRepos (repo_id) VALUES (?)\",\n                                        1, \"string\", repo_id);\n    }\n}\n\nstatic int\nadd_deleted_repo_to_trash (SeafRepoManager *mgr, const char *repo_id,\n                           SeafCommit *commit)\n{\n    char *owner = NULL;\n    int ret = -1;\n\n    owner = seaf_repo_manager_get_repo_owner (mgr, repo_id);\n    if (!owner) {\n        seaf_warning (\"Failed to get owner for repo %.8s.\\n\", repo_id);\n        goto out;\n    }\n\n    gint64 size = seaf_repo_manager_get_repo_size (mgr, repo_id);\n    if (size == -1) {\n        seaf_warning (\"Failed to get size of repo %.8s.\\n\", repo_id);\n        goto out;\n    }\n\n    ret =  seaf_db_statement_query (mgr->seaf->db,\n                                    \"INSERT INTO RepoTrash (repo_id, repo_name, head_id, \"\n                                    \"owner_id, size, org_id, del_time) \"\n                                    \"values (?, ?, ?, ?, ?, -1, ?)\", 6,\n                                    \"string\", repo_id,\n                                    \"string\", commit->repo_name,\n                                    \"string\", commit->commit_id,\n                                    \"string\", owner,\n                                    \"int64\", size,\n                                    \"int64\", (gint64)time(NULL));\nout:\n    g_free (owner);\n\n    return ret;\n}\n\nstatic int\nremove_virtual_repo_ondisk (SeafRepoManager *mgr,\n                            const char *repo_id)\n{\n    SeafDB *db = mgr->seaf->db;\n\n    /* Remove record in repo table first.\n     * Once this is commited, we can gc the other tables later even if\n     * we're interrupted.\n     */\n    if (seaf_db_statement_query (db, \"DELETE FROM Repo WHERE repo_id = ?\",\n                                 1, \"string\", repo_id) < 0)\n        return -1;\n\n    /* remove branch */\n    GList *p;\n    GList *branch_list = \n        seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo_id);\n    for (p = branch_list; p; p = p->next) {\n        SeafBranch *b = (SeafBranch *)p->data;\n        seaf_repo_manager_branch_repo_unmap (mgr, b);\n        seaf_branch_manager_del_branch (seaf->branch_mgr, repo_id, b->name);\n    }\n    seaf_branch_list_free (branch_list);\n\n    seaf_db_statement_query (db, \"DELETE FROM RepoOwner WHERE repo_id = ?\",\n                   1, \"string\", repo_id);\n\n    seaf_db_statement_query (db, \"DELETE FROM SharedRepo WHERE repo_id = ?\",\n                   1, \"string\", repo_id);\n\n    seaf_db_statement_query (db, \"DELETE FROM RepoGroup WHERE repo_id = ?\",\n                   1, \"string\", repo_id);\n\n    if (!seaf->cloud_mode) {\n        seaf_db_statement_query (db, \"DELETE FROM InnerPubRepo WHERE repo_id = ?\",\n                                 1, \"string\", repo_id);\n    }\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoUserToken WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoValidSince WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoSize WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoInfo WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    /* For GC commit objects for this virtual repo. Fs and blocks are GC\n     * from the parent repo.\n     */\n    add_deleted_repo_record (mgr, repo_id);\n\n    return 0;\n}\n\nstatic gboolean\nget_branch (SeafDBRow *row, void *vid)\n{\n    char *ret = vid;\n    const char *commit_id;\n\n    commit_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (ret, commit_id, 41);\n\n    return FALSE;\n}\n\nstatic SeafCommit*\nget_head_commit (SeafRepoManager *mgr, const char *repo_id, gboolean *has_err)\n{\n    char commit_id[41];\n    char *sql;\n\n    commit_id[0] = 0;\n    sql = \"SELECT commit_id FROM Branch WHERE name=? AND repo_id=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       get_branch, commit_id,\n                                       2, \"string\", \"master\", \"string\", repo_id) < 0) {\n        *has_err = TRUE;\n        return NULL;\n    }\n\n    if (commit_id[0] == 0)\n        return NULL;\n\n    SeafCommit *head_commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo_id,\n                                                              1, commit_id);\n\n    return head_commit;\n}\n\nint\nseaf_repo_manager_del_repo (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            GError **error)\n{\n    gboolean has_err = FALSE;\n\n    SeafCommit *head_commit = get_head_commit (mgr, repo_id, &has_err);\n    if (has_err) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get head commit from db\");\n        return -1;\n    }\n    if (!head_commit) {\n        // head commit is missing, repo has beed deleted.\n        return 0;\n    }\n\n    if (add_deleted_repo_to_trash (mgr, repo_id, head_commit) < 0) {\n        // Add repo to trash failed, del repo directly\n        seaf_warning (\"Failed to add repo %.8s to trash, delete directly.\\n\",\n                      repo_id);\n    }\n\n    seaf_commit_unref (head_commit);\n\ndel_repo:\n    if (seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM Repo WHERE repo_id = ?\",\n                                 1, \"string\", repo_id) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to delete repo from db\");\n        return -1;\n    }\n\n    /* remove branch */\n    GList *p;\n    GList *branch_list = seaf_branch_manager_get_branch_list (seaf->branch_mgr, repo_id);\n    for (p = branch_list; p; p = p->next) {\n        SeafBranch *b = (SeafBranch *)p->data;\n        seaf_repo_manager_branch_repo_unmap (mgr, b);\n        seaf_branch_manager_del_branch (seaf->branch_mgr, repo_id, b->name);\n    }\n    seaf_branch_list_free (branch_list);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM RepoOwner WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM SharedRepo WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM RepoGroup WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    if (!seaf->cloud_mode) {\n        seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM InnerPubRepo WHERE repo_id = ?\",\n                                 1, \"string\", repo_id);\n    }\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE t.*, i.* FROM RepoUserToken t, \"\n                             \"RepoTokenPeerInfo i WHERE t.token=i.token AND \"\n                             \"t.repo_id=?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoHistoryLimit WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoValidSince WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoSize WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    /* Remove virtual repos when origin repo is deleted. */\n    GList *vrepos, *ptr;\n    vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id);\n    for (ptr = vrepos; ptr != NULL; ptr = ptr->next)\n        remove_virtual_repo_ondisk (mgr, (char *)ptr->data);\n    string_list_free (vrepos);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM RepoInfo \"\n                             \"WHERE repo_id=?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM VirtualRepo \"\n                             \"WHERE repo_id=? OR origin_repo=?\",\n                             2, \"string\", repo_id, \"string\", repo_id);\n\n    if (!head_commit)\n        add_deleted_repo_record(mgr, repo_id);\n\n    return 0;\n}\n\nint\nseaf_repo_manager_del_virtual_repo (SeafRepoManager *mgr,\n                                    const char *repo_id)\n{\n    int ret = remove_virtual_repo_ondisk (mgr, repo_id);\n\n    if (ret < 0)\n        return ret;\n\n    return seaf_db_statement_query (mgr->seaf->db,\n                                    \"DELETE FROM VirtualRepo WHERE repo_id = ?\",\n                                    1, \"string\", repo_id);\n}\n\nstatic gboolean\nrepo_exists_in_db (SeafDB *db, const char *id, gboolean *db_err)\n{\n    return seaf_db_statement_exists (db,\n                                     \"SELECT repo_id FROM Repo WHERE repo_id = ?\",\n                                     db_err, 1, \"string\", id);\n}\n\ngboolean\ncreate_repo_fill_size (SeafDBRow *row, void *data)\n{\n    SeafRepo **repo = data;\n    SeafBranch *head;\n\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    gint64 size = seaf_db_row_get_column_int64 (row, 1);\n    const char *commit_id = seaf_db_row_get_column_text (row, 2);\n    const char *vrepo_id = seaf_db_row_get_column_text (row, 3);\n    gint64 file_count = seaf_db_row_get_column_int64 (row, 7);\n    int status = seaf_db_row_get_column_int(row, 8);\n    const char *type = seaf_db_row_get_column_text (row, 9);\n\n    *repo = seaf_repo_new (repo_id, NULL, NULL);\n    if (!*repo)\n        return FALSE;\n\n    if (!commit_id) {\n        (*repo)->is_corrupted = TRUE;\n        return FALSE;\n    }\n\n    (*repo)->size = size;\n    (*repo)->file_count = file_count;\n    head = seaf_branch_new (\"master\", repo_id, commit_id);\n    (*repo)->head = head;\n    (*repo)->status = status;\n\n    if (vrepo_id) {\n        const char *origin_repo_id = seaf_db_row_get_column_text (row, 4);\n        const char *origin_path = seaf_db_row_get_column_text (row, 5);\n        const char *base_commit = seaf_db_row_get_column_text (row, 6);\n\n        SeafVirtRepo *vinfo = g_new0 (SeafVirtRepo, 1);\n        memcpy (vinfo->repo_id, vrepo_id, 36);\n        memcpy (vinfo->origin_repo_id, origin_repo_id, 36);\n        vinfo->path = g_strdup(origin_path);\n        memcpy (vinfo->base_commit, base_commit, 40);\n\n        (*repo)->virtual_info = vinfo;\n        memcpy ((*repo)->store_id, origin_repo_id, 36);\n    } else {\n        memcpy ((*repo)->store_id, repo_id, 36);\n    }\n    if (type) {\n        (*repo)->type = g_strdup(type);\n    }\n\n    return TRUE;\n}\n\nstatic SeafRepo*\nget_repo_from_db (SeafRepoManager *mgr, const char *id, gboolean *db_err)\n{\n    SeafRepo *repo = NULL;\n    const char *sql;\n\n    if (seaf_db_type(mgr->seaf->db) != SEAF_DB_TYPE_PGSQL)\n        sql = \"SELECT r.repo_id, s.size, b.commit_id, \"\n            \"v.repo_id, v.origin_repo, v.path, v.base_commit, fc.file_count, i.status, i.type FROM \"\n            \"Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id \"\n            \"LEFT JOIN RepoSize s ON r.repo_id = s.repo_id \"\n            \"LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id \"\n            \"LEFT JOIN RepoFileCount fc ON r.repo_id = fc.repo_id \"\n            \"LEFT JOIN RepoInfo i on r.repo_id = i.repo_id \"\n            \"WHERE r.repo_id = ? AND b.name = 'master'\";\n    else\n        sql = \"SELECT r.repo_id, s.\\\"size\\\", b.commit_id, \"\n            \"v.repo_id, v.origin_repo, v.path, v.base_commit, fc.file_count, i.status FROM \"\n            \"Repo r LEFT JOIN Branch b ON r.repo_id = b.repo_id \"\n            \"LEFT JOIN RepoSize s ON r.repo_id = s.repo_id \"\n            \"LEFT JOIN VirtualRepo v ON r.repo_id = v.repo_id \"\n            \"LEFT JOIN RepoFileCount fc ON r.repo_id = fc.repo_id \"\n            \"LEFT JOIN RepoInfo i on r.repo_id = i.repo_id \"\n            \"WHERE r.repo_id = ? AND b.name = 'master'\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                             create_repo_fill_size, &repo,\n                                             1, \"string\", id);\n    if (ret < 0)\n        *db_err = TRUE;\n\n    return repo;\n}\n\nSeafRepo*\nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id)\n{\n    int len = strlen(id);\n    SeafRepo *repo = NULL;\n    gboolean has_err = FALSE;\n\n    if (len >= 37)\n        return NULL;\n\n    repo = get_repo_from_db (manager, id, &has_err);\n\n    if (repo) {\n        if (repo->is_corrupted) {\n            seaf_repo_unref (repo);\n            return NULL;\n        }\n\n        load_repo (manager, repo);\n        if (repo->is_corrupted) {\n            seaf_repo_unref (repo);\n            return NULL;\n        }\n    }\n\n    return repo;\n}\n\nSeafRepo*\nseaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id)\n{\n    int len = strlen(id);\n    gboolean has_err = FALSE;\n    SeafRepo *ret = NULL;\n\n    if (len >= 37)\n        return NULL;\n\n    ret = get_repo_from_db (manager, id, &has_err);\n    if (has_err) {\n        ret = seaf_repo_new(id, NULL, NULL);\n        ret->is_corrupted = TRUE;\n        return ret;\n    }\n\n    if (ret) {\n        if (ret->is_corrupted) {\n            return ret;\n        }\n\n        load_repo (manager, ret);\n    }\n\n    return ret;\n}\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id)\n{\n    gboolean db_err = FALSE;\n    return repo_exists_in_db (manager->seaf->db, id, &db_err);\n}\n\nstatic int\nsave_branch_repo_map (SeafRepoManager *manager, SeafBranch *branch)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        exists = seaf_db_statement_exists (seaf->db,\n                                           \"SELECT repo_id FROM RepoHead WHERE repo_id=?\",\n                                           &err, 1, \"string\", branch->repo_id);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (seaf->db,\n                                          \"UPDATE RepoHead SET branch_name=? \"\n                                          \"WHERE repo_id=?\",\n                                          2, \"string\", branch->name,\n                                          \"string\", branch->repo_id);\n        else\n            rc = seaf_db_statement_query (seaf->db,\n                                          \"INSERT INTO RepoHead (repo_id, branch_name) VALUES (?, ?)\",\n                                          2, \"string\", branch->repo_id,\n                                          \"string\", branch->name);\n        return rc;\n    } else {\n        return seaf_db_statement_query (seaf->db,\n                                        \"REPLACE INTO RepoHead (repo_id, branch_name) VALUES (?, ?)\",\n                                        2, \"string\", branch->repo_id,\n                                        \"string\", branch->name);\n    }\n\n    return -1;\n}\n\nint\nseaf_repo_manager_branch_repo_unmap (SeafRepoManager *manager, SeafBranch *branch)\n{\n    return seaf_db_statement_query (seaf->db,\n                                    \"DELETE FROM RepoHead WHERE branch_name = ?\"\n                                    \" AND repo_id = ?\",\n                                    2, \"string\", branch->name,\n                                    \"string\", branch->repo_id);\n}\n\nint\nset_repo_commit_to_db (const char *repo_id, const char *repo_name, gint64 update_time,\n                       int version, gboolean is_encrypted, const char *last_modifier)\n{\n    char *sql;\n    gboolean exists = FALSE, db_err = FALSE;\n\n    sql = \"SELECT 1 FROM RepoInfo WHERE repo_id=?\";\n    exists = seaf_db_statement_exists (seaf->db, sql, &db_err, 1, \"string\", repo_id);\n    if (db_err)\n        return -1;\n\n    if (update_time == 0)\n        update_time = (gint64)time(NULL);\n\n    if (exists) {\n        sql = \"UPDATE RepoInfo SET name=?, update_time=?, version=?, is_encrypted=?, \"\n            \"last_modifier=? WHERE repo_id=?\";\n        if (seaf_db_statement_query (seaf->db, sql, 6,\n                                     \"string\", repo_name,\n                                     \"int64\", update_time,\n                                     \"int\", version,\n                                     \"int\", (is_encrypted ? 1:0),\n                                     \"string\", last_modifier,\n                                     \"string\", repo_id) < 0) {\n            seaf_warning (\"Failed to update repo info for repo %s.\\n\", repo_id);\n            return -1;\n        }    \n    } else {\n        sql = \"INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) \"\n            \"VALUES (?, ?, ?, ?, ?, ?)\";\n        if (seaf_db_statement_query (seaf->db, sql, 6,\n                                     \"string\", repo_id,\n                                     \"string\", repo_name,\n                                     \"int64\", update_time,\n                                     \"int\", version,\n                                     \"int\", (is_encrypted ? 1:0),\n                                     \"string\", last_modifier) < 0) {\n            seaf_warning (\"Failed to add repo info for repo %s.\\n\", repo_id);\n            return -1;\n        }\n    }\n\n    return 0;\n}\n\nstatic void\nload_repo_commit (SeafRepoManager *manager,\n                  SeafRepo *repo)\n{\n    SeafCommit *commit;\n\n    commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,\n                                                        repo->id,\n                                                        repo->head->commit_id);\n    if (!commit) {\n        seaf_warning (\"Commit %s:%s is missing\\n\", repo->id, repo->head->commit_id);\n        repo->is_corrupted = TRUE;\n        return;\n    }\n\n    seaf_repo_from_commit (repo, commit);\n\n    seaf_commit_unref (commit);\n}\n\nstatic void\nload_repo (SeafRepoManager *manager, SeafRepo *repo)\n{\n    repo->manager = manager;\n\n    load_repo_commit (manager, repo);\n}\n\nstatic void\nload_mini_repo (SeafRepoManager *manager, SeafRepo *repo)\n{\n    repo->manager = manager;\n    SeafCommit *commit;\n\n    commit = seaf_commit_manager_get_commit_compatible (manager->seaf->commit_mgr,\n                                                        repo->id,\n                                                        repo->head->commit_id);\n    if (!commit) {\n        seaf_warning (\"Commit %s:%s is missing\\n\", repo->id, repo->head->commit_id);\n        repo->is_corrupted = TRUE;\n        return;\n    }\n\n    repo->name = g_strdup (commit->repo_name);\n    repo->encrypted = commit->encrypted;\n    repo->last_modify = commit->ctime;\n    repo->version = commit->version;\n    repo->last_modifier = g_strdup (commit->creator_name);\n\n    seaf_commit_unref (commit);\n}\n\nstatic int\ncreate_tables_mysql (SeafRepoManager *mgr)\n{\n    SeafDB *db = mgr->seaf->db;\n    char *sql;\n\n    sql = \"CREATE TABLE IF NOT EXISTS Repo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n          \"repo_id CHAR(37), UNIQUE INDEX (repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoOwner (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), \"\n        \"owner_id VARCHAR(255),\"\n        \"UNIQUE INDEX (repo_id), INDEX (owner_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoGroup (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\"\n        \"repo_id CHAR(37), \"\n        \"group_id INTEGER, user_name VARCHAR(255), permission CHAR(15), \"\n        \"UNIQUE INDEX (group_id, repo_id), \"\n        \"INDEX (repo_id), INDEX (user_name))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS InnerPubRepo (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37),\"\n        \"permission CHAR(15), UNIQUE INDEX (repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoUserToken (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), \"\n        \"email VARCHAR(255), \"\n        \"token CHAR(41), \"\n        \"UNIQUE INDEX(repo_id, token), INDEX(token), INDEX (email))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"token CHAR(41), \"\n        \"peer_id CHAR(41), \"\n        \"peer_ip VARCHAR(50), \"\n        \"peer_name VARCHAR(255), \"\n        \"sync_time BIGINT, \"\n        \"client_ver VARCHAR(20), UNIQUE INDEX(token), INDEX(peer_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoHead (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), branch_name VARCHAR(10), UNIQUE INDEX(repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoSize (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37),\"\n        \"size BIGINT UNSIGNED,\"\n        \"head_id CHAR(41), UNIQUE INDEX (repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoHistoryLimit (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), days INTEGER, UNIQUE INDEX(repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoValidSince (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), timestamp BIGINT, UNIQUE INDEX(repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS WebAP (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(37), \"\n        \"access_property CHAR(10), UNIQUE INDEX(repo_id))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS VirtualRepo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(36),\"\n        \"origin_repo CHAR(36), path TEXT, base_commit CHAR(40), UNIQUE INDEX(repo_id), INDEX(origin_repo))\"\n        \"ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS GarbageRepos (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n          \"repo_id CHAR(36), UNIQUE INDEX(repo_id))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    /* Tables for online GC */\n\n    sql = \"CREATE TABLE IF NOT EXISTS GCID (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n          \"repo_id CHAR(36), gc_id CHAR(36), UNIQUE INDEX(repo_id)) ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS LastGCID (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n          \"repo_id CHAR(36), client_id VARCHAR(128), gc_id CHAR(36), UNIQUE INDEX(repo_id, client_id)) ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoTrash (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(36),\"\n        \"repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255),\"\n        \"size BIGINT(20), org_id INTEGER, del_time BIGINT, \"\n        \"UNIQUE INDEX(repo_id), INDEX(owner_id), INDEX(org_id))ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoFileCount (\"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(36),\"\n        \"file_count BIGINT UNSIGNED, UNIQUE INDEX(repo_id))ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoInfo (id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"repo_id CHAR(36), \"\n        \"name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, \"\n        \"is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0, type VARCHAR(10), \"\n        \"UNIQUE INDEX(repo_id), INDEX(type)) ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS WebUploadTempFiles ( \"\n        \"id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, repo_id CHAR(40) NOT NULL, \"\n        \"file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL, INDEX(repo_id)) ENGINE=INNODB\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    return 0;\n}\n\nstatic int\ncreate_tables_sqlite (SeafRepoManager *mgr)\n{\n    SeafDB *db = mgr->seaf->db;\n    char *sql;\n\n    sql = \"CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(37) PRIMARY KEY)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    /* Owner */\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoOwner (\"\n        \"repo_id CHAR(37) PRIMARY KEY, \"\n        \"owner_id TEXT)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n    sql = \"CREATE INDEX IF NOT EXISTS OwnerIndex ON RepoOwner (owner_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    /* Group repo */\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(37), \"\n        \"group_id INTEGER, user_name TEXT, permission CHAR(15))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE UNIQUE INDEX IF NOT EXISTS groupid_repoid_indx on \"\n        \"RepoGroup (group_id, repo_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS repogroup_repoid_index on \"\n        \"RepoGroup (repo_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS repogroup_username_indx on \"\n        \"RepoGroup (user_name)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    /* Public repo */\n\n    sql = \"CREATE TABLE IF NOT EXISTS InnerPubRepo (\"\n        \"repo_id CHAR(37) PRIMARY KEY,\"\n        \"permission CHAR(15))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoUserToken (\"\n        \"repo_id CHAR(37), \"\n        \"email VARCHAR(255), \"\n        \"token CHAR(41))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE UNIQUE INDEX IF NOT EXISTS repo_token_indx on \"\n        \"RepoUserToken (repo_id, token)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS repo_token_email_indx on \"\n        \"RepoUserToken (email)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (\"\n        \"token CHAR(41) PRIMARY KEY, \"\n        \"peer_id CHAR(41), \"\n        \"peer_ip VARCHAR(50), \"\n        \"peer_name VARCHAR(255), \"\n        \"sync_time BIGINT, \"\n        \"client_ver VARCHAR(20))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoHead (\"\n        \"repo_id CHAR(37) PRIMARY KEY, branch_name VARCHAR(10))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoSize (\"\n        \"repo_id CHAR(37) PRIMARY KEY,\"\n        \"size BIGINT UNSIGNED,\"\n        \"head_id CHAR(41))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoHistoryLimit (\"\n        \"repo_id CHAR(37) PRIMARY KEY, days INTEGER)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoValidSince (\"\n        \"repo_id CHAR(37) PRIMARY KEY, timestamp BIGINT)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(37) PRIMARY KEY, \"\n        \"access_property CHAR(10))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY,\"\n        \"origin_repo CHAR(36), path TEXT, base_commit CHAR(40))\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS virtualrepo_origin_repo_idx \"\n        \"ON VirtualRepo (origin_repo)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY,\"\n        \"repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size BIGINT UNSIGNED,\"\n        \"org_id INTEGER, del_time BIGINT)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS repotrash_owner_id_idx ON RepoTrash(owner_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS repotrash_org_id_idx ON RepoTrash(org_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoFileCount (\"\n        \"repo_id CHAR(36) PRIMARY KEY,\"\n        \"file_count BIGINT UNSIGNED)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, \"\n        \"name VARCHAR(255) NOT NULL, update_time INTEGER, version INTEGER, \"\n        \"is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, \"\n        \"file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    sql = \"CREATE INDEX IF NOT EXISTS webuploadtempfiles_repo_id_idx ON WebUploadTempFiles(repo_id)\";\n    if (seaf_db_query (db, sql) < 0)\n        return -1;\n\n    return 0;\n}\n\n/* static int */\n/* create_tables_pgsql (SeafRepoManager *mgr) */\n/* { */\n/*     SeafDB *db = mgr->seaf->db; */\n/*     char *sql; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS Repo (repo_id CHAR(36) PRIMARY KEY)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoOwner (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY, \" */\n/*         \"owner_id VARCHAR(255))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     if (!pgsql_index_exists (db, \"repoowner_owner_idx\")) { */\n/*         sql = \"CREATE INDEX repoowner_owner_idx ON RepoOwner (owner_id)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoGroup (repo_id CHAR(36), \" */\n/*         \"group_id INTEGER, user_name VARCHAR(255), permission VARCHAR(15), \" */\n/*         \"UNIQUE (group_id, repo_id))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     if (!pgsql_index_exists (db, \"repogroup_repoid_idx\")) { */\n/*         sql = \"CREATE INDEX repogroup_repoid_idx ON RepoGroup (repo_id)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     if (!pgsql_index_exists (db, \"repogroup_username_idx\")) { */\n/*         sql = \"CREATE INDEX repogroup_username_idx ON RepoGroup (user_name)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS InnerPubRepo (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY,\" */\n/*         \"permission VARCHAR(15))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoUserToken (\" */\n/*         \"repo_id CHAR(36), \" */\n/*         \"email VARCHAR(255), \" */\n/*         \"token CHAR(40), \" */\n/*         \"UNIQUE (repo_id, token))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     if (!pgsql_index_exists (db, \"repousertoken_email_idx\")) { */\n/*         sql = \"CREATE INDEX repousertoken_email_idx ON RepoUserToken (email)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoTokenPeerInfo (\" */\n/*         \"token CHAR(40) PRIMARY KEY, \" */\n/*         \"peer_id CHAR(40), \" */\n/*         \"peer_ip VARCHAR(40), \" */\n/*         \"peer_name VARCHAR(255), \" */\n/*         \"sync_time BIGINT, \" */\n/*         \"client_ver VARCHAR(20))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoHead (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY, branch_name VARCHAR(10))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoSize (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY,\" */\n/*         \"size BIGINT,\" */\n/*         \"head_id CHAR(40))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoHistoryLimit (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY, days INTEGER)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoValidSince (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY, timestamp BIGINT)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS WebAP (repo_id CHAR(36) PRIMARY KEY, \" */\n/*         \"access_property VARCHAR(10))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS VirtualRepo (repo_id CHAR(36) PRIMARY KEY,\" */\n/*         \"origin_repo CHAR(36), path TEXT, base_commit CHAR(40))\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     if (!pgsql_index_exists (db, \"virtualrepo_origin_repo_idx\")) { */\n/*         sql = \"CREATE INDEX virtualrepo_origin_repo_idx ON VirtualRepo (origin_repo)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS GarbageRepos (repo_id CHAR(36) PRIMARY KEY)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoTrash (repo_id CHAR(36) PRIMARY KEY,\" */\n/*         \"repo_name VARCHAR(255), head_id CHAR(40), owner_id VARCHAR(255), size bigint,\" */\n/*         \"org_id INTEGER, del_time BIGINT)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     if (!pgsql_index_exists (db, \"repotrash_owner_id\")) { */\n/*         sql = \"CREATE INDEX repotrash_owner_id on RepoTrash(owner_id)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n/*     if (!pgsql_index_exists (db, \"repotrash_org_id\")) { */\n/*         sql = \"CREATE INDEX repotrash_org_id on RepoTrash(org_id)\"; */\n/*         if (seaf_db_query (db, sql) < 0) */\n/*             return -1; */\n/*     } */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoFileCount (\" */\n/*         \"repo_id CHAR(36) PRIMARY KEY,\" */\n/*         \"file_count BIGINT)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS WebUploadTempFiles (repo_id CHAR(40) NOT NULL, \" */\n/*         \"file_path TEXT NOT NULL, tmp_file_path TEXT NOT NULL)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     sql = \"CREATE TABLE IF NOT EXISTS RepoInfo (repo_id CHAR(36) PRIMARY KEY, \" */\n/*         \"name VARCHAR(255) NOT NULL, update_time BIGINT, version INTEGER, \" */\n/*         \"is_encrypted INTEGER, last_modifier VARCHAR(255), status INTEGER DEFAULT 0)\"; */\n/*     if (seaf_db_query (db, sql) < 0) */\n/*         return -1; */\n\n/*     return 0; */\n/* } */\n\nstatic int\ncreate_db_tables_if_not_exist (SeafRepoManager *mgr)\n{\n    if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL)\n        return 0;\n\n    SeafDB *db = mgr->seaf->db;\n    int db_type = seaf_db_type (db);\n\n    if (db_type == SEAF_DB_TYPE_MYSQL)\n        return create_tables_mysql (mgr);\n    else if (db_type == SEAF_DB_TYPE_SQLITE)\n        return create_tables_sqlite (mgr);\n    /* else if (db_type == SEAF_DB_TYPE_PGSQL) */\n    /*     return create_tables_pgsql (mgr); */\n\n    g_return_val_if_reached (-1);\n}\n\n/*\n * Repo properties functions.\n */\n\nstatic inline char *\ngenerate_repo_token ()\n{\n    char *uuid = gen_uuid ();\n    unsigned char sha1[20];\n    char token[41];\n    SHA_CTX s;\n\n    SHA1_Init (&s);\n    SHA1_Update (&s, uuid, strlen(uuid));\n    SHA1_Final (sha1, &s);\n\n    rawdata_to_hex (sha1, token, 20);\n\n    g_free (uuid);\n\n    return g_strdup (token);\n}\n\nstatic int\nadd_repo_token (SeafRepoManager *mgr,\n                const char *repo_id,\n                const char *email,\n                const char *token,\n                GError **error)\n{\n    int rc = seaf_db_statement_query (mgr->seaf->db,\n                                      \"INSERT INTO RepoUserToken (repo_id, email, token) VALUES (?, ?, ?)\",\n                                      3, \"string\", repo_id, \"string\", email,\n                                      \"string\", token);\n\n    if (rc < 0) {\n        seaf_warning (\"failed to add repo token. repo = %s, email = %s\\n\",\n                      repo_id, email);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n        return -1;\n    }\n\n    return 0;\n}\n\nchar *\nseaf_repo_manager_generate_repo_token (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *email,\n                                       GError **error)\n{\n    char *token = generate_repo_token ();\n    if (add_repo_token (mgr, repo_id, email, token, error) < 0) {\n        g_free (token);        \n        return NULL;\n    }\n\n    return token;\n}\n\nint\nseaf_repo_manager_add_token_peer_info (SeafRepoManager *mgr,\n                                       const char *token,\n                                       const char *peer_id,\n                                       const char *peer_ip,\n                                       const char *peer_name,\n                                       gint64 sync_time,\n                                       const char *client_ver)\n{\n    int ret = 0;\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"INSERT INTO RepoTokenPeerInfo (token, peer_id, peer_ip, peer_name, sync_time, client_ver)\"\n                                 \"VALUES (?, ?, ?, ?, ?, ?)\",\n                                 6, \"string\", token,\n                                 \"string\", peer_id,\n                                 \"string\", peer_ip,\n                                 \"string\", peer_name,\n                                 \"int64\", sync_time,\n                                 \"string\", client_ver) < 0)\n        ret = -1;\n\n    return ret;\n}\n\nint\nseaf_repo_manager_update_token_peer_info (SeafRepoManager *mgr,\n                                          const char *token,\n                                          const char *peer_ip,\n                                          gint64 sync_time,\n                                          const char *client_ver)\n{\n    int ret = 0;\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"UPDATE RepoTokenPeerInfo SET \"\n                                 \"peer_ip=?, sync_time=?, client_ver=? WHERE token=?\",\n                                 4, \"string\", peer_ip,\n                                 \"int64\", sync_time,\n                                 \"string\", client_ver,\n                                 \"string\", token) < 0)\n        ret = -1;\n\n    return ret;\n}\n\ngboolean\nseaf_repo_manager_token_peer_info_exists (SeafRepoManager *mgr,\n                                          const char *token)\n{\n    gboolean db_error = FALSE;\n\n    return seaf_db_statement_exists (mgr->seaf->db,\n                                     \"SELECT token FROM RepoTokenPeerInfo WHERE token=?\",\n                                     &db_error, 1, \"string\", token);\n}\n\nint\nseaf_repo_manager_delete_token (SeafRepoManager *mgr,\n                                const char *repo_id,\n                                const char *token,\n                                const char *user,\n                                GError **error)\n{\n    char *token_owner;\n\n    token_owner = seaf_repo_manager_get_email_by_token (mgr, repo_id, token);\n    if (!token_owner || strcmp (user, token_owner) != 0) {\n        seaf_warning (\"Requesting user is %s, token owner is %s, \"\n                      \"refuse to delete token %.10s.\\n\", user, token_owner, token);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Permission denied\");\n        return -1;\n    }\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"DELETE t.*, i.* FROM RepoUserToken t, \"\n                                 \"RepoTokenPeerInfo i WHERE t.token=i.token AND \"\n                                 \"t.token=?\",\n                                 1, \"string\", token) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n        return -1;\n    }\n\n    GList *tokens = NULL;\n    tokens = g_list_append (tokens, g_strdup(token));\n#ifdef HAVE_EVHTP\n    seaf_http_server_invalidate_tokens (seaf->http_server, tokens);\n#endif\n    g_list_free_full (tokens, (GDestroyNotify)g_free);\n\n    return 0;\n}\n\nstatic gboolean\ncollect_repo_token (SeafDBRow *row, void *data)\n{\n    GList **ret_list = data;\n    const char *repo_id, *repo_owner, *email, *token;\n    const char *peer_id, *peer_ip, *peer_name;\n    gint64 sync_time;\n    const char *client_ver;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    repo_owner = seaf_db_row_get_column_text (row, 1);\n    email = seaf_db_row_get_column_text (row, 2);\n    token = seaf_db_row_get_column_text (row, 3);\n\n    peer_id = seaf_db_row_get_column_text (row, 4);\n    peer_ip = seaf_db_row_get_column_text (row, 5);\n    peer_name = seaf_db_row_get_column_text (row, 6);\n    sync_time = seaf_db_row_get_column_int64 (row, 7);\n    client_ver = seaf_db_row_get_column_text (row, 8);\n\n    char *owner_l = g_ascii_strdown (repo_owner, -1);\n    char *email_l = g_ascii_strdown (email, -1);\n\n    SeafileRepoTokenInfo *repo_token_info;\n    repo_token_info = g_object_new (SEAFILE_TYPE_REPO_TOKEN_INFO,\n                                    \"repo_id\", repo_id,\n                                    \"repo_owner\", owner_l,\n                                    \"email\", email_l,\n                                    \"token\", token,\n                                    \"peer_id\", peer_id,\n                                    \"peer_ip\", peer_ip,\n                                    \"peer_name\", peer_name,\n                                    \"sync_time\", sync_time,\n                                    \"client_ver\", client_ver,\n                                    NULL);\n\n    *ret_list = g_list_prepend (*ret_list, repo_token_info);\n\n    g_free (owner_l);\n    g_free (email_l);\n\n    return TRUE;\n}\n\nstatic void\nfill_in_token_info (GList *info_list)\n{\n    GList *ptr;\n    SeafileRepoTokenInfo *info;\n    SeafRepo *repo;\n    char *repo_name;\n\n    for (ptr = info_list; ptr; ptr = ptr->next) {\n        info = ptr->data;\n        repo = seaf_repo_manager_get_repo (seaf->repo_mgr,\n                                           seafile_repo_token_info_get_repo_id(info));\n        if (repo)\n            repo_name = g_strdup(repo->name);\n        else\n            repo_name = g_strdup(\"Unknown\");\n        seaf_repo_unref (repo);\n\n        g_object_set (info, \"repo_name\", repo_name, NULL);\n        g_free (repo_name);\n    }\n}\n\nGList *\nseaf_repo_manager_list_repo_tokens (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    GError **error)\n{\n    GList *ret_list = NULL;\n    char *sql;\n    gboolean db_err = FALSE;\n\n    if (!repo_exists_in_db (mgr->seaf->db, repo_id, &db_err)) {\n        if (db_err) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n        }\n        return NULL;\n    }\n\n    sql = \"SELECT u.repo_id, o.owner_id, u.email, u.token, \"\n        \"p.peer_id, p.peer_ip, p.peer_name, p.sync_time, p.client_ver \"\n        \"FROM RepoUserToken u LEFT JOIN RepoTokenPeerInfo p \"\n        \"ON u.token = p.token, RepoOwner o \"\n        \"WHERE u.repo_id = ? and o.repo_id = ? \";\n\n    int n_row = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                              collect_repo_token, &ret_list,\n                                              2, \"string\", repo_id,\n                                              \"string\", repo_id);\n    if (n_row < 0) {\n        seaf_warning (\"DB error when get token info for repo %.10s.\\n\",\n                      repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n    }\n\n    fill_in_token_info (ret_list);\n\n    return g_list_reverse(ret_list);\n}\n\nGList *\nseaf_repo_manager_list_repo_tokens_by_email (SeafRepoManager *mgr,\n                                             const char *email,\n                                             GError **error)\n{\n    GList *ret_list = NULL;\n    char *sql;\n\n    sql = \"SELECT u.repo_id, o.owner_id, u.email, u.token, \"\n        \"p.peer_id, p.peer_ip, p.peer_name, p.sync_time, p.client_ver \"\n        \"FROM RepoUserToken u LEFT JOIN RepoTokenPeerInfo p \"\n        \"ON u.token = p.token, RepoOwner o \"\n        \"WHERE u.email = ? and u.repo_id = o.repo_id\";\n\n    int n_row = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                              collect_repo_token, &ret_list,\n                                              1, \"string\", email);\n    if (n_row < 0) {\n        seaf_warning (\"DB error when get token info for email %s.\\n\",\n                      email);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n    }\n\n    fill_in_token_info (ret_list);\n\n    return g_list_reverse(ret_list);\n}\n\nstatic gboolean\ncollect_token_list (SeafDBRow *row, void *data)\n{\n    GList **p_tokens = data;\n    const char *token;\n\n    token = seaf_db_row_get_column_text (row, 0);\n    *p_tokens = g_list_prepend (*p_tokens, g_strdup(token));\n\n    return TRUE;\n}\n\n/**\n * Delete all repo tokens for a given user on a given client\n */\n\nint\nseaf_repo_manager_delete_repo_tokens_by_peer_id (SeafRepoManager *mgr,\n                                                 const char *email,\n                                                 const char *peer_id,\n                                                 GList **tokens,\n                                                 GError **error)\n{\n    int ret = 0;\n    const char *template;\n    GList *token_list = NULL;\n    int rc = 0;\n    int db_type = seaf_db_type (mgr->seaf->db);\n\n    template = \"SELECT u.token \"\n        \"FROM RepoUserToken u, RepoTokenPeerInfo p \"\n        \"WHERE u.token = p.token \"\n        \"AND u.email = ? AND p.peer_id = ?\";\n    rc = seaf_db_statement_foreach_row (mgr->seaf->db, template,\n                                        collect_token_list, &token_list,\n                                        2, \"string\", email, \"string\", peer_id);\n    if (rc < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n        goto out;\n    }\n\n    if (rc == 0)\n        goto out;\n\n    if (db_type == SEAF_DB_TYPE_MYSQL) {\n        rc = seaf_db_statement_query (mgr->seaf->db, \"DELETE u.*, p.* \"\n                                      \"FROM RepoUserToken u, RepoTokenPeerInfo p \"\n                                      \"WHERE u.token=p.token AND \"\n                                      \"u.email = ? AND p.peer_id = ?\",\n                                      2, \"string\", email, \"string\", peer_id);\n        if (rc < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n            goto out;\n        }\n    } else if (db_type == SEAF_DB_TYPE_SQLITE) {\n        GString *sql = g_string_new (\"\");\n        GList *iter;\n        int i = 0;\n        char *token;\n\n        g_string_append_printf (sql, \"DELETE FROM RepoUserToken WHERE email = '%s' AND token IN (\", email);\n        for (iter = token_list; iter; iter = iter->next) {\n            token = iter->data;\n            if (i == 0)\n                g_string_append_printf (sql, \"'%s'\", token);\n            else\n                g_string_append_printf (sql, \", '%s'\", token);\n            ++i;\n        }\n        g_string_append (sql, \")\");\n\n        rc = seaf_db_statement_query (mgr->seaf->db, sql->str, 0);\n        if (rc < 0) {\n            g_string_free (sql, TRUE);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n            goto out;\n        }\n        g_string_free (sql, TRUE);\n\n        sql = g_string_new (\"\");\n        g_string_append_printf (sql, \"DELETE FROM RepoTokenPeerInfo WHERE peer_id = '%s' AND token IN (\", peer_id);\n        i = 0;\n        for (iter = token_list; iter; iter = iter->next) {\n            token = iter->data;\n            if (i == 0)\n                g_string_append_printf (sql, \"'%s'\", token);\n            else\n                g_string_append_printf (sql, \", '%s'\", token);\n            ++i;\n        }\n        g_string_append (sql, \")\");\n\n        rc = seaf_db_statement_query (mgr->seaf->db, sql->str, 0);\n        if (rc < 0) {\n            g_string_free (sql, TRUE);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n            goto out;\n        }\n        g_string_free (sql, TRUE);\n    }\n\nout:\n    if (rc < 0) {\n        ret = -1;\n        g_list_free_full (token_list, (GDestroyNotify)g_free);\n    } else {\n        *tokens = token_list;\n    }\n\n    return ret;\n}\n\nint\nseaf_repo_manager_delete_repo_tokens_by_email (SeafRepoManager *mgr,\n                                               const char *email,\n                                               GError **error)\n{\n    int ret = 0;\n    const char *template;\n    GList *token_list = NULL;\n    int rc;\n\n    template = \"SELECT u.token \"\n        \"FROM RepoUserToken u, RepoTokenPeerInfo p \"\n        \"WHERE u.token = p.token \"\n        \"AND u.email = ?\";\n    rc = seaf_db_statement_foreach_row (mgr->seaf->db, template,\n                                        collect_token_list, &token_list,\n                                        1, \"string\", email);\n    if (rc < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n        goto out;\n    }\n\n    if (rc == 0)\n        goto out;\n\n    rc = seaf_db_statement_query (mgr->seaf->db, \"DELETE u.*, p.* \"\n                                  \"FROM RepoUserToken u, RepoTokenPeerInfo p \"\n                                  \"WHERE u.token=p.token AND \"\n                                  \"u.email = ?\",\n                                  1, \"string\", email);\n    if (rc < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL, \"DB error\");\n        goto out;\n    }\n\n#ifdef HAVE_EVHTP\n    seaf_http_server_invalidate_tokens (seaf->http_server, token_list);\n#endif\n\nout:\n    g_list_free_full (token_list, (GDestroyNotify)g_free);\n\n    if (rc < 0) {\n        ret = -1;\n    }\n\n    return ret;\n}\n\nstatic gboolean\nget_email_by_token_cb (SeafDBRow *row, void *data)\n{\n    char **email_ptr = data;\n\n    const char *email = (const char *) seaf_db_row_get_column_text (row, 0);\n    *email_ptr = g_ascii_strdown (email, -1);\n    /* There should be only one result. */\n    return FALSE;\n}\n\nchar *\nseaf_repo_manager_get_email_by_token (SeafRepoManager *manager,\n                                      const char *repo_id,\n                                      const char *token)\n{\n    if (!repo_id || !token)\n        return NULL;\n    \n    char *email = NULL;\n    char *sql;\n\n    sql = \"SELECT email FROM RepoUserToken \"\n        \"WHERE repo_id = ? AND token = ?\";\n\n    seaf_db_statement_foreach_row (seaf->db, sql,\n                                   get_email_by_token_cb, &email,\n                                   2, \"string\", repo_id, \"string\", token);\n\n    return email;\n}\n\nstatic gboolean\nget_repo_size (SeafDBRow *row, void *vsize)\n{\n    gint64 *psize = vsize;\n\n    *psize = seaf_db_row_get_column_int64 (row, 0);\n\n    return FALSE;\n}\n\ngint64\nseaf_repo_manager_get_repo_size (SeafRepoManager *mgr, const char *repo_id)\n{\n    gint64 size = 0;\n    char *sql;\n\n    sql = \"SELECT size FROM RepoSize WHERE repo_id=?\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       get_repo_size, &size,\n                                       1, \"string\", repo_id) < 0)\n        return -1;\n\n    return size;\n}\n\nint\nseaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          int days)\n{\n    SeafVirtRepo *vinfo;\n    SeafDB *db = mgr->seaf->db;\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo) {\n        seaf_virtual_repo_info_free (vinfo);\n        return 0;\n    }\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        exists = seaf_db_statement_exists (db,\n                                           \"SELECT repo_id FROM RepoHistoryLimit \"\n                                           \"WHERE repo_id=?\",\n                                           &err, 1, \"string\", repo_id);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE RepoHistoryLimit SET days=? \"\n                                          \"WHERE repo_id=?\",\n                                          2, \"int\", days, \"string\", repo_id);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO RepoHistoryLimit (repo_id, days) VALUES \"\n                                          \"(?, ?)\",\n                                          2, \"string\", repo_id, \"int\", days);\n        return rc;\n    } else {\n        if (seaf_db_statement_query (db,\n                                     \"REPLACE INTO RepoHistoryLimit (repo_id, days) VALUES (?, ?)\",\n                                     2, \"string\", repo_id, \"int\", days) < 0)\n            return -1;\n    }\n\n    return 0;\n}\n\nstatic gboolean\nget_history_limit_cb (SeafDBRow *row, void *data)\n{\n    int *limit = data;\n\n    *limit = seaf_db_row_get_column_int (row, 0);\n\n    return FALSE;\n}\n\nint\nseaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id)\n{\n    SeafVirtRepo *vinfo;\n    const char *r_repo_id = repo_id;\n    char *sql;\n    int per_repo_days = -1;\n    int ret;\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo)\n        r_repo_id = vinfo->origin_repo_id;\n\n    sql = \"SELECT days FROM RepoHistoryLimit WHERE repo_id=?\";\n\n    ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_history_limit_cb,\n                                         &per_repo_days, 1, \"string\", r_repo_id);\n    if (ret == 0) {\n        // limit not set, return global one\n        per_repo_days= seaf_cfg_manager_get_config_int (mgr->seaf->cfg_mgr,\n                                                        \"history\", \"keep_days\");\n    }\n\n    // db error or limit set as negative, means keep full history, return -1\n    if (per_repo_days < 0)\n        per_repo_days = -1;\n\n    seaf_virtual_repo_info_free (vinfo);\n\n    return per_repo_days;\n}\n\nint\nseaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        gint64 timestamp)\n{\n    SeafDB *db = mgr->seaf->db;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean exists, err;\n        int rc;\n\n        exists = seaf_db_statement_exists (db,\n                                           \"SELECT repo_id FROM RepoValidSince WHERE \"\n                                           \"repo_id=?\", &err, 1, \"string\", repo_id);\n        if (err)\n            return -1;\n\n        if (exists)\n            rc = seaf_db_statement_query (db,\n                                          \"UPDATE RepoValidSince SET timestamp=?\"\n                                          \" WHERE repo_id=?\",\n                                          2, \"int64\", timestamp, \"string\", repo_id);\n        else\n            rc = seaf_db_statement_query (db,\n                                          \"INSERT INTO RepoValidSince (repo_id, timestamp) VALUES \"\n                                          \"(?, ?)\", 2, \"string\", repo_id,\n                                          \"int64\", timestamp);\n        if (rc < 0)\n            return -1;\n    } else {\n        if (seaf_db_statement_query (db,\n                           \"REPLACE INTO RepoValidSince (repo_id, timestamp) VALUES (?, ?)\",\n                           2, \"string\", repo_id, \"int64\", timestamp) < 0)\n            return -1;\n    }\n\n    return 0;\n}\n\ngint64\nseaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id)\n{\n    char *sql;\n\n    sql = \"SELECT timestamp FROM RepoValidSince WHERE repo_id=?\";\n    /* Also return -1 if doesn't exist. */\n    return seaf_db_statement_get_int64 (mgr->seaf->db, sql, 1, \"string\", repo_id);\n}\n\ngint64\nseaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr,\n                                          const char *repo_id)\n{\n    int days;\n    gint64 timestamp;\n\n    days = seaf_repo_manager_get_repo_history_limit (mgr, repo_id);\n    timestamp = seaf_repo_manager_get_repo_valid_since (mgr, repo_id);\n\n    gint64 now = (gint64)time(NULL);\n    if (days > 0)\n        return MAX (now - days * 24 * 3600, timestamp);\n    else if (days < 0)\n        return timestamp;\n    else\n        return 0;\n}\n\n/*\n * Permission related functions.\n */\n\n/* Owner functions. */\n\nint\nseaf_repo_manager_set_repo_owner (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  const char *email)\n{\n    SeafDB *db = mgr->seaf->db;\n    char sql[256];\n    char *orig_owner = NULL;\n    int ret = 0;\n\n    orig_owner = seaf_repo_manager_get_repo_owner (mgr, repo_id);\n    if (g_strcmp0 (orig_owner, email) == 0)\n        goto out;\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean err;\n        snprintf(sql, sizeof(sql),\n                 \"SELECT repo_id FROM RepoOwner WHERE repo_id=?\");\n        if (seaf_db_statement_exists (db, sql, &err,\n                                      1, \"string\", repo_id))\n            snprintf(sql, sizeof(sql),\n                     \"UPDATE RepoOwner SET owner_id='%s' WHERE \"\n                     \"repo_id='%s'\", email, repo_id);\n        else\n            snprintf(sql, sizeof(sql),\n                     \"INSERT INTO RepoOwner (repo_id, owner_id) VALUES ('%s', '%s')\",\n                     repo_id, email);\n        if (err) {\n            ret = -1;\n            goto out;\n        }\n\n        if (seaf_db_query (db, sql) < 0) {\n            ret = -1;\n            goto out;\n        }\n    } else {\n        if (seaf_db_statement_query (db, \"REPLACE INTO RepoOwner (repo_id, owner_id) VALUES (?, ?)\",\n                                     2, \"string\", repo_id, \"string\", email) < 0) {\n            ret = -1;\n            goto out;\n        }\n    }\n\n    /* If the repo was newly created, no need to remove share and virtual repos. */\n    if (!orig_owner)\n        goto out;\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM SharedRepo WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM RepoGroup WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    if (!seaf->cloud_mode) {\n        seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM InnerPubRepo WHERE repo_id = ?\",\n                                 1, \"string\", repo_id);\n    }\n\n    /* Remove virtual repos when repo ownership changes. */\n    GList *vrepos, *ptr;\n    vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id);\n    for (ptr = vrepos; ptr != NULL; ptr = ptr->next)\n        remove_virtual_repo_ondisk (mgr, (char *)ptr->data);\n    string_list_free (vrepos);\n\n    seaf_db_statement_query (mgr->seaf->db, \"DELETE FROM VirtualRepo \"\n                             \"WHERE repo_id=? OR origin_repo=?\",\n                             2, \"string\", repo_id, \"string\", repo_id);\n\nout:\n    g_free (orig_owner);\n    return ret;\n}\n\nstatic gboolean\nget_owner (SeafDBRow *row, void *data)\n{\n    char **owner_id = data;\n\n    const char *owner = (const char *) seaf_db_row_get_column_text (row, 0);\n    *owner_id = g_ascii_strdown (owner, -1);\n    /* There should be only one result. */\n    return FALSE;\n}\n\nchar *\nseaf_repo_manager_get_repo_owner (SeafRepoManager *mgr,\n                                  const char *repo_id)\n{\n    char *sql;\n    char *ret = NULL;\n\n    sql = \"SELECT owner_id FROM RepoOwner WHERE repo_id=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       get_owner, &ret,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"Failed to get owner id for repo %s.\\n\", repo_id);\n        return NULL;\n    }\n\n    return ret;\n}\n\nstatic gboolean\ncollect_repo_id (SeafDBRow *row, void *data)\n{\n    GList **p_ids = data;\n    const char *repo_id;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_orphan_repo_list (SeafRepoManager *mgr)\n{\n    GList *id_list = NULL, *ptr;\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, sizeof(sql), \"SELECT Repo.repo_id FROM Repo LEFT JOIN \"\n              \"RepoOwner ON Repo.repo_id = RepoOwner.repo_id WHERE \"\n              \"RepoOwner.owner_id is NULL\");\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql,\n                                      collect_repo_id, &id_list) < 0)\n        return NULL;\n\n    for (ptr = id_list; ptr; ptr = ptr->next) {\n        char *repo_id = ptr->data;\n        SeafRepo *repo = seaf_repo_manager_get_repo (mgr, repo_id);\n        if (repo != NULL)\n            ret = g_list_prepend (ret, repo);\n    }\n\n    string_list_free (id_list);\n\n    return ret;\n}\n\ngboolean\ncollect_repos_fill_size_commit (SeafDBRow *row, void *data)\n{\n    GList **prepos = data;\n    SeafRepo *repo;\n    SeafBranch *head;\n\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    gint64 size = seaf_db_row_get_column_int64 (row, 1);\n    const char *commit_id = seaf_db_row_get_column_text (row, 2);\n    const char *repo_name = seaf_db_row_get_column_text (row, 3);\n    gint64 update_time = seaf_db_row_get_column_int64 (row, 4);\n    int version = seaf_db_row_get_column_int (row, 5);\n    gboolean is_encrypted = seaf_db_row_get_column_int (row, 6) ? TRUE : FALSE;\n    const char *last_modifier = seaf_db_row_get_column_text (row, 7);\n    int status = seaf_db_row_get_column_int (row, 8);\n    const char *type = seaf_db_row_get_column_text (row, 9);\n\n    repo = seaf_repo_new (repo_id, NULL, NULL);\n    if (!repo)\n        return TRUE;\n\n    if (!commit_id) {\n        repo->is_corrupted = TRUE;\n        goto out;\n    }\n\n    repo->size = size;\n    if (seaf_db_row_get_column_count (row) == 11) {\n        gint64 file_count = seaf_db_row_get_column_int64 (row, 10);\n        repo->file_count = file_count;\n    }\n    head = seaf_branch_new (\"master\", repo_id, commit_id);\n    repo->head = head;\n    if (repo_name) {\n        repo->name = g_strdup (repo_name);\n        repo->last_modify = update_time;\n        repo->version = version;\n        repo->encrypted = is_encrypted;\n        repo->last_modifier = g_strdup (last_modifier);\n        repo->status = status;\n    }\n    if (type) {\n        repo->type = g_strdup(type);\n    }\n\nout:\n    *prepos = g_list_prepend (*prepos, repo);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,\n                                      const char *email,\n                                      int ret_corrupted,\n                                      int start,\n                                      int limit,\n                                      gboolean *db_err)\n{\n    GList *repo_list = NULL, *ptr;\n    GList *ret = NULL;\n    char *sql;\n    SeafRepo *repo = NULL;\n    int db_type = seaf_db_type(mgr->seaf->db);\n\n    if (start == -1 && limit == -1) {\n        if (db_type != SEAF_DB_TYPE_PGSQL)\n            sql = \"SELECT o.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM \"\n                \"RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON o.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id \"\n                \"WHERE owner_id=? AND \"\n                \"v.repo_id IS NULL \"\n                \"ORDER BY i.update_time DESC, o.repo_id\";\n        else\n            sql = \"SELECT o.repo_id, s.\\\"size\\\", b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status FROM \"\n                \"RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON o.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id \"\n                \"WHERE owner_id=? AND \"\n                \"o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) \"\n                \"ORDER BY i.update_time DESC, o.repo_id\";\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                           collect_repos_fill_size_commit, &repo_list,\n                                           1, \"string\", email) < 0) {\n            if (db_err)\n                *db_err = TRUE;\n            return NULL;\n        }\n    } else {\n        if (db_type != SEAF_DB_TYPE_PGSQL)\n            sql = \"SELECT o.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM \"\n                \"RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON o.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON o.repo_id = v.repo_id \"\n                \"WHERE owner_id=? AND \"\n                \"v.repo_id IS NULL \"\n                \"ORDER BY i.update_time DESC, o.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n        else\n            sql = \"SELECT o.repo_id, s.\\\"size\\\", b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status FROM \"\n                \"RepoOwner o LEFT JOIN RepoSize s ON o.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON o.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoInfo i ON o.repo_id = i.repo_id \"\n                \"WHERE owner_id=? AND \"\n                \"o.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) \"\n                \"ORDER BY i.update_time DESC, o.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                           collect_repos_fill_size_commit,\n                                           &repo_list,\n                                           3, \"string\", email,\n                                           \"int\", limit,\n                                           \"int\", start) < 0) {\n            if (db_err)\n                *db_err = TRUE;\n            return NULL;\n        }\n    }\n\n    for (ptr = repo_list; ptr; ptr = ptr->next) {\n        repo = ptr->data;\n        if (ret_corrupted) {\n            if (!repo->is_corrupted && (!repo->name || !repo->last_modifier)) {\n                load_mini_repo (mgr, repo);\n                if (!repo->is_corrupted)\n                    set_repo_commit_to_db (repo->id, repo->name, repo->last_modify,\n                                           repo->version, (repo->encrypted ? 1 : 0),\n                                           repo->last_modifier);\n            }\n        } else {\n            if (repo->is_corrupted) {\n                seaf_repo_unref (repo);\n                continue;\n            }\n            if (!repo->name || !repo->last_modifier) {\n                load_mini_repo (mgr, repo);\n                if (!repo->is_corrupted)\n                    set_repo_commit_to_db (repo->id, repo->name, repo->last_modify,\n                                           repo->version, (repo->encrypted ? 1 : 0),\n                                           repo->last_modifier);\n            }\n            if (repo->is_corrupted) {\n                seaf_repo_unref (repo);\n                continue;\n            }\n        }\n        if (repo != NULL)\n            ret = g_list_prepend (ret, repo);\n    }\n    g_list_free (repo_list);\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_repos_by_id_prefix (SeafRepoManager *mgr,\n                                          const char *id_prefix,\n                                          int start,\n                                          int limit)\n{\n    GList *repo_list = NULL, *ptr;\n    char *sql;\n    SeafRepo *repo = NULL;\n    int len = strlen(id_prefix);\n\n    if (len >= 37)\n        return NULL;\n\n    int db_type = seaf_db_type(mgr->seaf->db);\n    char *db_patt = g_strdup_printf (\"%s%%\", id_prefix);\n\n    if (start == -1 && limit == -1) {\n        if (db_type != SEAF_DB_TYPE_PGSQL)\n            sql = \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE i.repo_id LIKE ? AND \"\n                \"v.repo_id IS NULL \"\n                \"ORDER BY i.update_time DESC, i.repo_id\";\n        else\n            sql = \"SELECT i.repo_id, s.\\\"size\\\", b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"WHERE i.repo_id LIKE ? AND \"\n                \"i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) \"\n                \"ORDER BY i.update_time DESC, i.repo_id\";\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                           collect_repos_fill_size_commit, &repo_list,\n                                           1, \"string\", db_patt) < 0) {\n            g_free(db_patt);\n            return NULL;\n        }\n    } else {\n        if (db_type != SEAF_DB_TYPE_PGSQL)\n            sql = \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE i.repo_id LIKE ? AND \"\n                \"v.repo_id IS NULL \"\n                \"ORDER BY i.update_time DESC, i.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n        else\n            sql = \"SELECT i.repo_id, s.\\\"size\\\", b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"WHERE i.repo_id LIKE ? AND \"\n                \"i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) \"\n                \"ORDER BY i.update_time DESC, i.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                           collect_repos_fill_size_commit,\n                                           &repo_list,\n                                           3, \"string\", db_patt,\n                                           \"int\", limit,\n                                           \"int\", start) < 0) {\n            g_free(db_patt);\n            return NULL;\n        }\n    }\n\n    g_free(db_patt);\n\n    return repo_list;\n}\n\nGList *\nseaf_repo_manager_search_repos_by_name (SeafRepoManager *mgr, const char *name)\n{\n    GList *repo_list = NULL;\n    char *sql = NULL;\n\n    char *db_patt = g_strdup_printf (\"%%%s%%\", name);\n\n    switch (seaf_db_type(seaf->db)) {\n    case SEAF_DB_TYPE_MYSQL:\n        sql = \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n            \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, fc.file_count FROM \"\n            \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n            \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n            \"LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id \"\n            \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n            \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n            \"WHERE i.name COLLATE UTF8_GENERAL_CI LIKE ? AND \"\n            \"r.repo_id IS NOT NULL AND \"\n            \"v.repo_id IS NULL \"\n            \"ORDER BY i.update_time DESC, i.repo_id\";\n        break;\n    case SEAF_DB_TYPE_PGSQL:\n        sql = \"SELECT i.repo_id, s.\\\"size\\\", b.commit_id, i.name, i.update_time, \"\n            \"i.version, i.is_encrypted, i.last_modifier, i.status, fc.file_count FROM \"\n            \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n            \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n            \"LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id \"\n            \"WHERE i.name ILIKE ? AND \"\n            \"i.repo_id IN (SELECT r.repo_id FROM Repo r) AND \"\n            \"i.repo_id NOT IN (SELECT v.repo_id FROM VirtualRepo v) \"\n            \"ORDER BY i.update_time DESC, i.repo_id\";\n        break;\n    case SEAF_DB_TYPE_SQLITE:\n        sql = \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n            \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, fc.file_count FROM \"\n            \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n            \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n            \"LEFT JOIN RepoFileCount fc ON i.repo_id = fc.repo_id \"\n            \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n            \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n            \"WHERE i.name LIKE ? COLLATE NOCASE AND \"\n            \"r.repo_id IS NOT NULL AND \"\n            \"v.repo_id IS NULL \"\n            \"ORDER BY i.update_time DESC, i.repo_id\";\n        break;\n    default:\n        g_free (db_patt);\n        return NULL;\n    }\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       collect_repos_fill_size_commit, &repo_list,\n                                       1, \"string\", db_patt) < 0) {\n        g_free (db_patt);\n        return NULL;\n    }\n\n    g_free (db_patt);\n    return repo_list;\n}\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr)\n{\n    GList *ret = NULL;\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT repo_id FROM Repo\");\n\n    if (seaf_db_foreach_selected_row (mgr->seaf->db, sql, \n                                      collect_repo_id, &ret) < 0)\n        return NULL;\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit, const char *order_by, int ret_virt_repo)\n{\n    GList *ret = NULL;\n    int rc;\n    GString *sql = g_string_new (\"\");\n\n    if (start == -1 && limit == -1) {\n        switch (seaf_db_type(mgr->seaf->db)) {\n        case SEAF_DB_TYPE_MYSQL:\n            g_string_append (sql, \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id \"\n                \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE r.repo_id IS NOT NULL \");\n            if (!ret_virt_repo)\n                g_string_append_printf (sql, \"AND v.repo_id IS NULL \");\n            if (g_strcmp0 (order_by, \"size\") == 0)\n                g_string_append_printf (sql, \"ORDER BY s.size DESC, i.repo_id\");\n            else if (g_strcmp0 (order_by, \"file_count\") == 0)\n                g_string_append_printf (sql, \"ORDER BY f.file_count DESC, i.repo_id\");\n            else\n                g_string_append_printf (sql, \"ORDER BY i.update_time DESC, i.repo_id\");\n            break;\n        case SEAF_DB_TYPE_SQLITE:\n            g_string_append (sql, \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id \"\n                \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE r.repo_id IS NOT NULL \");\n            if (!ret_virt_repo)\n                g_string_append_printf (sql, \"AND v.repo_id IS NULL \");\n            if (g_strcmp0 (order_by, \"size\") == 0)\n                g_string_append_printf (sql, \"ORDER BY s.size DESC, i.repo_id\");\n            else if (g_strcmp0 (order_by, \"file_count\") == 0)\n                g_string_append_printf (sql, \"ORDER BY f.file_count DESC, i.repo_id\");\n            else\n                g_string_append_printf (sql, \"ORDER BY i.update_time DESC, i.repo_id\");\n            break;\n        default:\n            g_string_free (sql, TRUE);\n            return NULL;\n        }\n\n        rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql->str,\n                                            collect_repos_fill_size_commit, &ret,\n                                            0);\n    } else {\n        switch (seaf_db_type(mgr->seaf->db)) {\n        case SEAF_DB_TYPE_MYSQL:\n            g_string_append (sql, \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id \"\n                \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE r.repo_id IS NOT NULL \");\n            if (!ret_virt_repo)\n                g_string_append_printf (sql, \"AND v.repo_id IS NULL \");\n            if (g_strcmp0 (order_by, \"size\") == 0)\n                g_string_append_printf (sql, \"ORDER BY s.size DESC, i.repo_id LIMIT ? OFFSET ?\");\n            else if (g_strcmp0 (order_by, \"file_count\") == 0)\n                g_string_append_printf (sql, \"ORDER BY f.file_count DESC, i.repo_id LIMIT ? OFFSET ?\");\n            else\n                g_string_append_printf (sql, \"ORDER BY i.update_time DESC, i.repo_id LIMIT ? OFFSET ?\");\n            break;\n        case SEAF_DB_TYPE_SQLITE:\n            g_string_append (sql, \"SELECT i.repo_id, s.size, b.commit_id, i.name, i.update_time, \"\n                \"i.version, i.is_encrypted, i.last_modifier, i.status, i.type, f.file_count FROM \"\n                \"RepoInfo i LEFT JOIN RepoSize s ON i.repo_id = s.repo_id \"\n                \"LEFT JOIN Branch b ON i.repo_id = b.repo_id \"\n                \"LEFT JOIN RepoFileCount f ON i.repo_id = f.repo_id \"\n                \"LEFT JOIN Repo r ON i.repo_id = r.repo_id \"\n                \"LEFT JOIN VirtualRepo v ON i.repo_id = v.repo_id \"\n                \"WHERE r.repo_id IS NOT NULL \");\n            if (!ret_virt_repo)\n                g_string_append_printf (sql, \"AND v.repo_id IS NULL \");\n            if (g_strcmp0 (order_by, \"size\") == 0)\n                g_string_append_printf (sql, \"ORDER BY s.size DESC, i.repo_id LIMIT ? OFFSET ?\");\n            else if (g_strcmp0 (order_by, \"file_count\") == 0)\n                g_string_append_printf (sql, \"ORDER BY f.file_count DESC, i.repo_id LIMIT ? OFFSET ?\");\n            else\n                g_string_append_printf (sql, \"ORDER BY i.update_time DESC, i.repo_id LIMIT ? OFFSET ?\");\n            break;\n        default:\n            g_string_free (sql, TRUE);\n            return NULL;\n        }\n\n        rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql->str,\n                                            collect_repos_fill_size_commit, &ret,\n                                            2, \"int\", limit, \"int\", start);\n    }\n\n    g_string_free (sql, TRUE);\n\n    if (rc < 0)\n        return NULL;\n\n    return g_list_reverse (ret);\n}\n\ngint64\nseaf_repo_manager_count_repos (SeafRepoManager *mgr, GError **error)\n{\n    gint64 num = seaf_db_get_int64 (mgr->seaf->db,\n                                    \"SELECT COUNT(repo_id) FROM Repo\");\n    if (num < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to count repos from db\");\n    }\n\n    return num;\n}\n\nGList *\nseaf_repo_manager_get_repo_ids_by_owner (SeafRepoManager *mgr,\n                                         const char *email)\n{\n    GList *ret = NULL;\n    char *sql;\n\n    sql = \"SELECT repo_id FROM RepoOwner WHERE owner_id=?\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       collect_repo_id, &ret,\n                                       1, \"string\", email) < 0) {\n        string_list_free (ret);\n        return NULL;\n    }\n\n    return ret;\n}\n\nstatic gboolean\ncollect_trash_repo (SeafDBRow *row, void *data)\n{\n    GList **trash_repos = data;\n    const char *repo_id;\n    const char *repo_name;\n    const char *head_id;\n    const char *owner_id;\n    gint64 size;\n    gint64 del_time;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    repo_name = seaf_db_row_get_column_text (row, 1);\n    head_id = seaf_db_row_get_column_text (row, 2);\n    owner_id = seaf_db_row_get_column_text (row, 3);\n    size = seaf_db_row_get_column_int64 (row, 4);\n    del_time = seaf_db_row_get_column_int64 (row, 5);\n\n\n    if (!repo_id || !repo_name || !head_id || !owner_id)\n        return TRUE;\n\n    SeafileTrashRepo *trash_repo = g_object_new (SEAFILE_TYPE_TRASH_REPO,\n                                                 \"repo_id\", repo_id,\n                                                 \"repo_name\", repo_name,\n                                                 \"head_id\", head_id,\n                                                 \"owner_id\", owner_id,\n                                                 \"size\", size,\n                                                 \"del_time\", del_time,\n                                                 NULL);\n    if (!trash_repo)\n        return FALSE;\n\n    SeafCommit *commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr,\n                                                                    repo_id, head_id);\n    if (!commit) {\n        seaf_warning (\"Commit %s not found in repo %s\\n\", head_id, repo_id);\n        g_object_unref (trash_repo);\n        return TRUE;\n    }\n    g_object_set (trash_repo, \"encrypted\", commit->encrypted, NULL);\n    seaf_commit_unref (commit);\n\n    *trash_repos = g_list_prepend (*trash_repos, trash_repo);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_trash_repo_list (SeafRepoManager *mgr,\n                                       int start,\n                                       int limit,\n                                       GError **error)\n{\n    GList *trash_repos = NULL;\n    int rc;\n\n    if (start == -1 && limit == -1)\n        rc = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                            \"SELECT repo_id, repo_name, head_id, owner_id, \"\n                                            \"size, del_time FROM RepoTrash ORDER BY del_time DESC\",\n                                            collect_trash_repo, &trash_repos,\n                                            0);\n    else\n        rc = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                            \"SELECT repo_id, repo_name, head_id, owner_id, \"\n                                            \"size, del_time FROM RepoTrash \"\n                                            \"ORDER BY del_time DESC LIMIT ? OFFSET ?\",\n                                            collect_trash_repo, &trash_repos,\n                                            2, \"int\", limit, \"int\", start);\n\n    if (rc < 0) {\n        while (trash_repos) {\n            g_object_unref (trash_repos->data);\n            trash_repos = g_list_delete_link (trash_repos, trash_repos);\n        }\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get trashed repo from db.\");\n        return NULL;\n    }\n\n    return g_list_reverse (trash_repos);\n}\n\nGList *\nseaf_repo_manager_get_trash_repos_by_owner (SeafRepoManager *mgr,\n                                            const char *owner,\n                                            GError **error)\n{\n    GList *trash_repos = NULL;\n    int rc;\n\n    rc = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                        \"SELECT repo_id, repo_name, head_id, owner_id, \"\n                                        \"size, del_time FROM RepoTrash WHERE owner_id = ?\",\n                                        collect_trash_repo, &trash_repos,\n                                        1, \"string\", owner);\n\n    if (rc < 0) {\n        while (trash_repos) {\n            g_object_unref (trash_repos->data);\n            trash_repos = g_list_delete_link (trash_repos, trash_repos);\n        }\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get trashed repo from db.\");\n        return NULL;\n    }\n\n    return trash_repos;\n}\n\nSeafileTrashRepo *\nseaf_repo_manager_get_repo_from_trash (SeafRepoManager *mgr,\n                                       const char *repo_id)\n{\n    SeafileTrashRepo *ret = NULL;\n    GList *trash_repos = NULL;\n    char *sql;\n    int rc;\n\n    sql = \"SELECT repo_id, repo_name, head_id, owner_id, size, del_time FROM RepoTrash \"\n        \"WHERE repo_id = ?\";\n    rc = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                        collect_trash_repo, &trash_repos,\n                                        1, \"string\", repo_id);\n    if (rc < 0)\n        return NULL;\n\n    /* There should be only one results, since repo_id is a PK. */\n    if (trash_repos)\n        ret = trash_repos->data;\n\n    g_list_free (trash_repos);\n    return ret;\n}\n\nint\nseaf_repo_manager_del_repo_from_trash (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       GError **error)\n{\n    /* As long as the repo is successfully moved into GarbageRepo table,\n     * we consider this operation successful.\n     */\n    if (add_deleted_repo_record (mgr, repo_id) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"DB error: Add deleted record\");\n        return -1;\n    }\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoFileCount WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoTrash WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    seaf_db_statement_query (mgr->seaf->db,\n                             \"DELETE FROM RepoInfo WHERE repo_id = ?\",\n                             1, \"string\", repo_id);\n\n    return 0;\n}\n\nint\nseaf_repo_manager_empty_repo_trash (SeafRepoManager *mgr, GError **error)\n{\n    GList *trash_repos = NULL, *ptr;\n    SeafileTrashRepo *repo;\n\n    trash_repos = seaf_repo_manager_get_trash_repo_list (mgr, -1, -1, error);\n    if (*error)\n        return -1;\n\n    for (ptr = trash_repos; ptr; ptr = ptr->next) {\n        repo = ptr->data;\n        seaf_repo_manager_del_repo_from_trash (mgr,\n                                               seafile_trash_repo_get_repo_id(repo),\n                                               NULL);\n        g_object_unref (repo);\n    }\n\n    g_list_free (trash_repos);\n    return 0;\n}\n\nint\nseaf_repo_manager_empty_repo_trash_by_owner (SeafRepoManager *mgr,\n                                             const char *owner,\n                                             GError **error)\n{\n    GList *trash_repos = NULL, *ptr;\n    SeafileTrashRepo *repo;\n\n    trash_repos = seaf_repo_manager_get_trash_repos_by_owner (mgr, owner, error);\n    if (*error)\n        return -1;\n\n    for (ptr = trash_repos; ptr; ptr = ptr->next) {\n        repo = ptr->data;\n        seaf_repo_manager_del_repo_from_trash (mgr,\n                                               seafile_trash_repo_get_repo_id(repo),\n                                               NULL);\n        g_object_unref (repo);\n    }\n\n    g_list_free (trash_repos);\n    return 0;\n}\n\nint\nseaf_repo_manager_restore_repo_from_trash (SeafRepoManager *mgr,\n                                           const char *repo_id,\n                                           GError **error)\n{\n    SeafileTrashRepo *repo = NULL;\n    int ret = 0;\n    gboolean exists = FALSE;\n    gboolean db_err;\n    const char *head_id = NULL;\n    SeafCommit *commit = NULL;\n\n    repo = seaf_repo_manager_get_repo_from_trash (mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Repo %.8s not found in trash.\\n\", repo_id);\n        return -1;\n    }\n\n    SeafDBTrans *trans = seaf_db_begin_transaction (mgr->seaf->db);\n\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM Repo WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n\n    if (!exists) {\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO Repo(repo_id) VALUES (?)\",\n                                   1, \"string\", repo_id) < 0;\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Insert Repo.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM RepoOwner WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n\n    if (!exists) {\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO RepoOwner (repo_id, owner_id) VALUES (?, ?)\",\n                                   2, \"string\", repo_id,\n                                   \"string\", seafile_trash_repo_get_owner_id(repo));\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Insert Repo Owner.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM Branch WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n    if (!exists) {\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO Branch (name, repo_id, commit_id) VALUES ('master', ?, ?)\",\n                                   2, \"string\", repo_id,\n                                   \"string\", seafile_trash_repo_get_head_id(repo));\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Insert Branch.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM RepoHead WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n    if (!exists) {\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO RepoHead (repo_id, branch_name) VALUES (?, 'master')\",\n                                   1, \"string\", repo_id);\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Set RepoHead.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    // Restore repo size\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM RepoSize WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n\n    if (!exists) {\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)\",\n                                   3, \"string\", repo_id,\n                                   \"int64\", seafile_trash_repo_get_size (repo),\n                                   \"string\", seafile_trash_repo_get_head_id (repo));\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Insert Repo Size.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    // Restore repo info\n    exists = seaf_db_trans_check_for_existence (trans,\n                                                \"SELECT 1 FROM RepoInfo WHERE repo_id=?\",\n                                                &db_err, 1, \"string\", repo_id);\n\n    if (!exists) {\n        head_id = seafile_trash_repo_get_head_id (repo);\n        commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr,\n                                                            repo_id, head_id);\n        if (!commit) {\n            seaf_warning (\"Commit %.8s of repo %.8s not found.\\n\", repo_id, head_id);\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            ret = -1;\n            goto out;\n        }\n        ret = seaf_db_trans_query (trans,\n                                   \"INSERT INTO RepoInfo (repo_id, name, update_time, version, is_encrypted, last_modifier) VALUES (?, ?, ?, ?, ?, ?)\",\n                                   6, \"string\", repo_id,\n                                   \"string\", seafile_trash_repo_get_repo_name (repo),\n                                   \"int64\", commit->ctime,\n                                   \"int\", commit->version,\n                                   \"int\", commit->encrypted,\n                                   \"string\", commit->creator_name);\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"DB error: Insert Repo Info.\");\n            seaf_db_rollback (trans);\n            seaf_db_trans_close (trans);\n            goto out;\n        }\n    }\n\n    ret = seaf_db_trans_query (trans,\n                               \"DELETE FROM RepoTrash WHERE repo_id = ?\",\n                               1, \"string\", repo_id);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"DB error: delete from RepoTrash.\");\n        seaf_db_rollback (trans);\n        seaf_db_trans_close (trans);\n        goto out;\n    }\n\n    if (seaf_db_commit (trans) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"DB error: Failed to commit.\");\n        seaf_db_rollback (trans);\n        ret = -1;\n    }\n\n    seaf_db_trans_close (trans);\n\nout:\n    seaf_commit_unref (commit);\n    g_object_unref (repo);\n    return ret;\n}\n\n/* Web access permission. */\n\nint\nseaf_repo_manager_set_access_property (SeafRepoManager *mgr, const char *repo_id,\n                                       const char *ap)\n{\n    int rc;\n\n    if (seaf_repo_manager_query_access_property (mgr, repo_id) == NULL) {\n        rc = seaf_db_statement_query (mgr->seaf->db,\n                                      \"INSERT INTO WebAP (repo_id, access_property) VALUES (?, ?)\",\n                                      2, \"string\", repo_id, \"string\", ap);\n    } else {\n        rc = seaf_db_statement_query (mgr->seaf->db,\n                                      \"UPDATE WebAP SET access_property=? \"\n                                      \"WHERE repo_id=?\",\n                                      2, \"string\", ap, \"string\", repo_id);\n    }\n\n    if (rc < 0) {\n        seaf_warning (\"DB error when set access property for repo %s, %s.\\n\", repo_id, ap);\n        return -1;\n    }\n    \n    return 0;\n}\n\nstatic gboolean\nget_ap (SeafDBRow *row, void *data)\n{\n    char **ap = data;\n\n    *ap = g_strdup (seaf_db_row_get_column_text (row, 0));\n\n    return FALSE;\n}\n\nchar *\nseaf_repo_manager_query_access_property (SeafRepoManager *mgr, const char *repo_id)\n{\n    char *sql;\n    char *ret = NULL;\n\n    sql =  \"SELECT access_property FROM WebAP WHERE repo_id=?\";\n \n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_ap, &ret,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"DB error when get access property for repo %s.\\n\", repo_id);\n        return NULL;\n    }\n\n    return ret;\n}\n\n/* Group repos. */\n\nint\nseaf_repo_manager_add_group_repo (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  int group_id,\n                                  const char *owner,\n                                  const char *permission,\n                                  GError **error)\n{\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"INSERT INTO RepoGroup (repo_id, group_id, user_name, permission) VALUES (?, ?, ?, ?)\",\n                                 4, \"string\", repo_id, \"int\", group_id,\n                                 \"string\", owner, \"string\", permission) < 0)\n        return -1;\n\n    return 0;\n}\n\nint\nseaf_repo_manager_del_group_repo (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  int group_id,\n                                  GError **error)\n{\n    return seaf_db_statement_query (mgr->seaf->db,\n                                    \"DELETE FROM RepoGroup WHERE group_id=? \"\n                                    \"AND repo_id=?\",\n                                    2, \"int\", group_id, \"string\", repo_id);\n}\n\nstatic gboolean\nget_group_ids_cb (SeafDBRow *row, void *data)\n{\n    GList **plist = data;\n\n    int group_id = seaf_db_row_get_column_int (row, 0);\n\n    *plist = g_list_prepend (*plist, (gpointer)(long)group_id);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_groups_by_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      GError **error)\n{\n    char *sql;\n    GList *group_ids = NULL;\n    \n    sql =  \"SELECT group_id FROM RepoGroup WHERE repo_id = ?\";\n    \n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_ids_cb,\n                                       &group_ids, 1, \"string\", repo_id) < 0) {\n        g_list_free (group_ids);\n        return NULL;\n    }\n\n    return g_list_reverse (group_ids);\n}\n\nstatic gboolean\nget_group_perms_cb (SeafDBRow *row, void *data)\n{\n    GList **plist = data;\n    GroupPerm *perm = g_new0 (GroupPerm, 1);\n\n    perm->group_id = seaf_db_row_get_column_int (row, 0);\n    const char *permission = seaf_db_row_get_column_text(row, 1);\n    g_strlcpy (perm->permission, permission, sizeof(perm->permission));\n\n    *plist = g_list_prepend (*plist, perm);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_group_perm_by_repo (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          GError **error)\n{\n    char *sql;\n    GList *group_perms = NULL, *p;\n    \n    sql = \"SELECT group_id, permission FROM RepoGroup WHERE repo_id = ?\";\n    \n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_perms_cb,\n                                       &group_perms, 1, \"string\", repo_id) < 0) {\n        for (p = group_perms; p != NULL; p = p->next)\n            g_free (p->data);\n        g_list_free (group_perms);\n        return NULL;\n    }\n\n    return g_list_reverse (group_perms);\n}\n\nint\nseaf_repo_manager_set_group_repo_perm (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       int group_id,\n                                       const char *permission,\n                                       GError **error)\n{\n    return seaf_db_statement_query (mgr->seaf->db,\n                                    \"UPDATE RepoGroup SET permission=? WHERE \"\n                                    \"repo_id=? AND group_id=?\",\n                                    3, \"string\", permission, \"string\", repo_id,\n                                    \"int\", group_id);\n}\n\nint\nseaf_repo_manager_set_subdir_group_perm_by_path (SeafRepoManager *mgr,\n                                                 const char *repo_id,\n                                                 const char *username,\n                                                 int group_id,\n                                                 const char *permission,\n                                                 const char *path)\n{\n    return seaf_db_statement_query (mgr->seaf->db,\n                                    \"UPDATE RepoGroup SET permission=? WHERE repo_id IN \"\n                                    \"(SELECT repo_id FROM VirtualRepo WHERE origin_repo=? AND path=?) \"\n                                    \"AND group_id=? AND user_name=?\",\n                                    5, \"string\", permission,\n                                    \"string\", repo_id,\n                                    \"string\", path,\n                                    \"int\", group_id,\n                                    \"string\", username);\n}\nstatic gboolean\nget_group_repoids_cb (SeafDBRow *row, void *data)\n{\n    GList **p_list = data;\n\n    char *repo_id = g_strdup ((const char *)seaf_db_row_get_column_text (row, 0));\n\n    *p_list = g_list_prepend (*p_list, repo_id);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_group_repoids (SeafRepoManager *mgr,\n                                     int group_id,\n                                     GError **error)\n{\n    char *sql;\n    GList *repo_ids = NULL;\n\n    sql =  \"SELECT repo_id FROM RepoGroup WHERE group_id = ?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repoids_cb,\n                                       &repo_ids, 1, \"int\", group_id) < 0)\n        return NULL;\n\n    return g_list_reverse (repo_ids);\n}\n\nstatic gboolean\nget_group_repos_cb (SeafDBRow *row, void *data)\n{\n    GList **p_list = data;\n    SeafileRepo *srepo = NULL;\n\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    const char *vrepo_id = seaf_db_row_get_column_text (row, 1);\n    int group_id = seaf_db_row_get_column_int (row, 2);\n    const char *user_name = seaf_db_row_get_column_text (row, 3);\n    const char *permission = seaf_db_row_get_column_text (row, 4);\n    const char *commit_id = seaf_db_row_get_column_text (row, 5);\n    gint64 size = seaf_db_row_get_column_int64 (row, 6);\n    const char *repo_name = seaf_db_row_get_column_text (row, 9);\n    gint64 update_time = seaf_db_row_get_column_int64 (row, 10);\n    int version = seaf_db_row_get_column_int (row, 11);\n    gboolean is_encrypted = seaf_db_row_get_column_int (row, 12) ? TRUE : FALSE;\n    const char *last_modifier = seaf_db_row_get_column_text (row, 13);\n    int status = seaf_db_row_get_column_int (row, 14);\n    const char *type = seaf_db_row_get_column_text (row, 15);\n\n    char *user_name_l = g_ascii_strdown (user_name, -1);\n\n    srepo = g_object_new (SEAFILE_TYPE_REPO,\n                          \"share_type\", \"group\",\n                          \"repo_id\", repo_id,\n                          \"id\", repo_id,\n                          \"head_cmmt_id\", commit_id,\n                          \"group_id\", group_id,\n                          \"user\", user_name_l,\n                          \"permission\", permission,\n                          \"is_virtual\", (vrepo_id != NULL),\n                          \"size\", size,\n                          \"status\", status,\n                          NULL);\n    g_free (user_name_l);\n\n    if (srepo != NULL) {\n        if (vrepo_id) {\n            const char *origin_repo_id = seaf_db_row_get_column_text (row, 7);\n            const char *origin_path = seaf_db_row_get_column_text (row, 8);\n            const char *origin_repo_name = seaf_db_row_get_column_text (row, 16);\n            g_object_set (srepo, \"store_id\", origin_repo_id,\n                          \"origin_repo_id\", origin_repo_id,\n                          \"origin_repo_name\", origin_repo_name,\n                          \"origin_path\", origin_path, NULL);\n        } else {\n            g_object_set (srepo, \"store_id\", repo_id, NULL);\n        }\n        if (repo_name) {\n            g_object_set (srepo, \"name\", repo_name,\n                          \"repo_name\", repo_name,\n                          \"last_modify\", update_time,\n                          \"last_modified\", update_time,\n                          \"version\", version,\n                          \"encrypted\", is_encrypted,\n                          \"last_modifier\", last_modifier, NULL);\n        }\n        if (type) {\n            g_object_set (srepo, \"repo_type\", type, NULL);\n        }\n        *p_list = g_list_prepend (*p_list, srepo);\n    }\n\n    return TRUE;\n}\n\nvoid\nseaf_fill_repo_obj_from_commit (GList **repos)\n{\n    SeafileRepo *repo;\n    SeafCommit *commit;\n    char *repo_id;\n    char *commit_id;\n    char *repo_name = NULL;\n    char *last_modifier = NULL;\n    GList *p = *repos;\n    GList *next;\n\n    while (p) {\n        repo = p->data;\n        g_object_get (repo, \"name\", &repo_name, NULL);\n        g_object_get (repo, \"last_modifier\", &last_modifier, NULL);\n        if (!repo_name || !last_modifier) {\n            g_object_get (repo, \"repo_id\", &repo_id, \"head_cmmt_id\", &commit_id, NULL);\n            commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr,\n                                                                repo_id, commit_id);\n            if (!commit) {\n                seaf_warning (\"Commit %s not found in repo %s\\n\", commit_id, repo_id);\n                g_object_unref (repo);\n                next = p->next;\n                *repos = g_list_delete_link (*repos, p);\n                p = next;\n                if (repo_name)\n                    g_free (repo_name);\n                if (last_modifier)\n                    g_free (last_modifier);\n            } else {\n                g_object_set (repo, \"name\", commit->repo_name,\n                              \"repo_name\", commit->repo_name,\n                              \"last_modify\", commit->ctime,\n                              \"last_modified\", commit->ctime,\n                              \"version\", commit->version,\n                              \"encrypted\", commit->encrypted,\n                              \"last_modifier\", commit->creator_name,\n                              NULL);\n\n                /* Set to database */\n                set_repo_commit_to_db (repo_id, commit->repo_name, commit->ctime, commit->version,\n                                       commit->encrypted, commit->creator_name);\n                seaf_commit_unref (commit);\n            }\n            g_free (repo_id);\n            g_free (commit_id);\n        }\n        if (repo_name)\n            g_free (repo_name);\n        if (last_modifier)\n            g_free (last_modifier);\n\n        p = p->next;\n    }\n}\n\nGList *\nseaf_repo_manager_get_repos_by_group (SeafRepoManager *mgr,\n                                      int group_id,\n                                      GError **error)\n{\n    char *sql;\n    GList *repos = NULL;\n    GList *p;\n\n    sql = \"SELECT RepoGroup.repo_id, v.repo_id, \"\n        \"group_id, user_name, permission, commit_id, s.size, \"\n        \"v.origin_repo, v.path, i.name, \"\n        \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name \"\n        \"FROM RepoGroup LEFT JOIN VirtualRepo v ON \"\n        \"RepoGroup.repo_id = v.repo_id \"\n        \"LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id \"\n        \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id \"\n        \"LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, \"\n        \"Branch WHERE group_id = ? AND \"\n        \"RepoGroup.repo_id = Branch.repo_id AND \"\n        \"Branch.name = 'master'\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb,\n                                       &repos, 1, \"int\", group_id) < 0) {\n        for (p = repos; p; p = p->next) {\n            g_object_unref (p->data);\n        }\n        g_list_free (repos);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get repos by group from db.\");\n        return NULL;\n    }\n\n    seaf_fill_repo_obj_from_commit (&repos);\n\n    return g_list_reverse (repos);\n}\n\nGList *\nseaf_repo_manager_get_group_repos_by_owner (SeafRepoManager *mgr,\n                                            const char *owner,\n                                            GError **error)\n{\n    char *sql;\n    GList *repos = NULL;\n    GList *p;\n\n    sql = \"SELECT RepoGroup.repo_id, v.repo_id, \"\n        \"group_id, user_name, permission, commit_id, s.size, \"\n        \"v.origin_repo, v.path, i.name, \"\n        \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name \"\n        \"FROM RepoGroup LEFT JOIN VirtualRepo v ON \"\n        \"RepoGroup.repo_id = v.repo_id \"\n        \"LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id \"\n        \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id \"\n        \"LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, \"\n        \"Branch WHERE user_name = ? AND \"\n        \"RepoGroup.repo_id = Branch.repo_id AND \"\n        \"Branch.name = 'master'\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb,\n                                       &repos, 1, \"string\", owner) < 0) {\n        for (p = repos; p; p = p->next) {\n            g_object_unref (p->data);\n        }\n        g_list_free (repos);\n        return NULL;\n    }\n\n    seaf_fill_repo_obj_from_commit (&repos);\n\n    return g_list_reverse (repos);\n}\n\nstatic gboolean\nget_group_repo_owner (SeafDBRow *row, void *data)\n{\n    char **share_from = data;\n\n    const char *owner = (const char *) seaf_db_row_get_column_text (row, 0);\n    *share_from = g_ascii_strdown (owner, -1);\n    /* There should be only one result. */\n    return FALSE;\n}\n\nchar *\nseaf_repo_manager_get_group_repo_owner (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        GError **error)\n{\n    char *sql;\n    char *ret = NULL;\n\n    sql = \"SELECT user_name FROM RepoGroup WHERE repo_id = ?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       get_group_repo_owner, &ret,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"DB error when get repo share from for repo %s.\\n\",\n                   repo_id);\n        return NULL;\n    }\n\n    return ret;\n}\n\nint\nseaf_repo_manager_remove_group_repos (SeafRepoManager *mgr,\n                                      int group_id,\n                                      const char *owner,\n                                      GError **error)\n{\n    SeafDB *db = mgr->seaf->db;\n    int rc;\n\n    if (!owner) {\n        rc = seaf_db_statement_query (db, \"DELETE FROM RepoGroup WHERE group_id=?\",\n                                      1, \"int\", group_id);\n    } else {\n        rc = seaf_db_statement_query (db,\n                                      \"DELETE FROM RepoGroup WHERE group_id=? AND \"\n                                      \"user_name = ?\",\n                                      2, \"int\", group_id, \"string\", owner);\n    }\n\n    return rc;\n}\n\n/* Inner public repos */\n\nint\nseaf_repo_manager_set_inner_pub_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *permission)\n{\n    SeafDB *db = mgr->seaf->db;\n    char sql[256];\n\n    if (seaf_db_type(db) == SEAF_DB_TYPE_PGSQL) {\n        gboolean err;\n        snprintf(sql, sizeof(sql),\n                 \"SELECT repo_id FROM InnerPubRepo WHERE repo_id=?\");\n        if (seaf_db_statement_exists (db, sql, &err,\n                                      1, \"string\", repo_id))\n            snprintf(sql, sizeof(sql),\n                     \"UPDATE InnerPubRepo SET permission='%s' \"\n                     \"WHERE repo_id='%s'\", permission, repo_id);\n        else\n            snprintf(sql, sizeof(sql),\n                     \"INSERT INTO InnerPubRepo (repo_id, permission) VALUES \"\n                     \"('%s', '%s')\", repo_id, permission);\n        if (err)\n            return -1;\n        return seaf_db_query (db, sql);\n    } else {\n        return seaf_db_statement_query (db,\n                                        \"REPLACE INTO InnerPubRepo (repo_id, permission) VALUES (?, ?)\",\n                                        2, \"string\", repo_id, \"string\", permission);\n    }\n\n    return -1;\n}\n\nint\nseaf_repo_manager_unset_inner_pub_repo (SeafRepoManager *mgr,\n                                        const char *repo_id)\n{\n    return seaf_db_statement_query (mgr->seaf->db,\n                                    \"DELETE FROM InnerPubRepo WHERE repo_id = ?\",\n                                    1, \"string\", repo_id);\n}\n\ngboolean\nseaf_repo_manager_is_inner_pub_repo (SeafRepoManager *mgr,\n                                     const char *repo_id)\n{\n    gboolean db_err = FALSE;\n\n    return seaf_db_statement_exists (mgr->seaf->db,\n                                     \"SELECT repo_id FROM InnerPubRepo WHERE repo_id=?\",\n                                     &db_err, 1, \"string\", repo_id);\n}\n\nstatic gboolean\ncollect_public_repos (SeafDBRow *row, void *data)\n{\n    GList **ret = (GList **)data;\n    SeafileRepo *srepo;\n    const char *repo_id, *vrepo_id, *owner, *permission, *commit_id;\n    gint64 size;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    vrepo_id = seaf_db_row_get_column_text (row, 1);\n    owner = seaf_db_row_get_column_text (row, 2);\n    permission = seaf_db_row_get_column_text (row, 3);\n    commit_id = seaf_db_row_get_column_text (row, 4);\n    size = seaf_db_row_get_column_int64 (row, 5);\n    const char *repo_name = seaf_db_row_get_column_text (row, 8);\n    gint64 update_time = seaf_db_row_get_column_int64 (row, 9);\n    int version = seaf_db_row_get_column_int (row, 10);\n    gboolean is_encrypted = seaf_db_row_get_column_int (row, 11) ? TRUE : FALSE;\n    const char *last_modifier = seaf_db_row_get_column_text (row, 12);\n    int status = seaf_db_row_get_column_int (row, 13);\n    const char *type = seaf_db_row_get_column_text (row, 14);\n\n    char *owner_l = g_ascii_strdown (owner, -1);\n\n    srepo = g_object_new (SEAFILE_TYPE_REPO,\n                          \"share_type\", \"public\",\n                          \"repo_id\", repo_id,\n                          \"id\", repo_id,\n                          \"head_cmmt_id\", commit_id,\n                          \"permission\", permission,\n                          \"user\", owner_l,\n                          \"is_virtual\", (vrepo_id != NULL),\n                          \"size\", size,\n                          \"status\", status,\n                          NULL);\n    g_free (owner_l);\n\n    if (srepo) {\n        if (vrepo_id) {\n            const char *origin_repo_id = seaf_db_row_get_column_text (row, 6);\n            const char *origin_path = seaf_db_row_get_column_text (row, 7);\n            g_object_set (srepo, \"store_id\", origin_repo_id,\n                          \"origin_repo_id\", origin_repo_id,\n                          \"origin_path\", origin_path, NULL);\n        } else {\n            g_object_set (srepo, \"store_id\", repo_id, NULL);\n        }\n\n        if (repo_name) {\n            g_object_set (srepo, \"name\", repo_name,\n                          \"repo_name\", repo_name,\n                          \"last_modify\", update_time,\n                          \"last_modified\", update_time,\n                          \"version\", version,\n                          \"encrypted\", is_encrypted,\n                          \"last_modifier\", last_modifier, NULL);\n        }\n        if (type) {\n            g_object_set (srepo, \"repo_type\", type, NULL);\n        }\n\n        *ret = g_list_prepend (*ret, srepo);\n    }\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_list_inner_pub_repos (SeafRepoManager *mgr, gboolean *db_err)\n{\n    GList *ret = NULL, *p;\n    char *sql;\n\n    sql = \"SELECT InnerPubRepo.repo_id, VirtualRepo.repo_id, \"\n        \"owner_id, permission, commit_id, s.size, \"\n        \"VirtualRepo.origin_repo, VirtualRepo.path, i.name, \"\n        \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type \"\n        \"FROM InnerPubRepo LEFT JOIN VirtualRepo ON \"\n        \"InnerPubRepo.repo_id=VirtualRepo.repo_id \"\n        \"LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id \"\n        \"LEFT JOIN RepoSize s ON InnerPubRepo.repo_id = s.repo_id, RepoOwner, Branch \"\n        \"WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND \"\n        \"InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       collect_public_repos, &ret,\n                                       0) < 0) {\n        for (p = ret; p != NULL; p = p->next)\n            g_object_unref (p->data);\n        g_list_free (ret);\n        if (db_err)\n            *db_err = TRUE;\n        return NULL;\n    }\n\n    seaf_fill_repo_obj_from_commit (&ret);\n\n    return g_list_reverse (ret);\n}\n\ngint64\nseaf_repo_manager_count_inner_pub_repos (SeafRepoManager *mgr)\n{\n    char sql[256];\n\n    snprintf (sql, 256, \"SELECT COUNT(*) FROM InnerPubRepo\");\n\n    return seaf_db_get_int64(mgr->seaf->db, sql);\n}\n\nGList *\nseaf_repo_manager_list_inner_pub_repos_by_owner (SeafRepoManager *mgr,\n                                                 const char *user)\n{\n    GList *ret = NULL, *p;\n    char *sql;\n\n    sql = \"SELECT InnerPubRepo.repo_id, VirtualRepo.repo_id, \"\n        \"owner_id, permission, commit_id, s.size, \"\n        \"VirtualRepo.origin_repo, VirtualRepo.path, i.name, \"\n        \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type \"\n        \"FROM InnerPubRepo LEFT JOIN VirtualRepo ON \"\n        \"InnerPubRepo.repo_id=VirtualRepo.repo_id \"\n        \"LEFT JOIN RepoInfo i ON InnerPubRepo.repo_id = i.repo_id \"\n        \"LEFT JOIN RepoSize s ON InnerPubRepo.repo_id = s.repo_id, RepoOwner, Branch \"\n        \"WHERE InnerPubRepo.repo_id=RepoOwner.repo_id AND owner_id=? \"\n        \"AND InnerPubRepo.repo_id = Branch.repo_id AND Branch.name = 'master'\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       collect_public_repos, &ret,\n                                       1, \"string\", user) < 0) {\n        for (p = ret; p != NULL; p = p->next)\n            g_object_unref (p->data);\n        g_list_free (ret);\n        return NULL;\n    }\n\n    seaf_fill_repo_obj_from_commit (&ret);\n\n    return g_list_reverse (ret);\n}\n\nchar *\nseaf_repo_manager_get_inner_pub_repo_perm (SeafRepoManager *mgr,\n                                           const char *repo_id)\n{\n    char *sql;\n\n    sql = \"SELECT permission FROM InnerPubRepo WHERE repo_id=?\";\n    return seaf_db_statement_get_string(mgr->seaf->db, sql, 1, \"string\", repo_id);\n}\n\n\nint\nseaf_repo_manager_is_valid_filename (SeafRepoManager *mgr,\n                                     const char *repo_id,\n                                     const char *filename,\n                                     GError **error)\n{\n    if (should_ignore_file(filename, NULL))\n        return 0;\n    else\n        return 1;\n}\n\ntypedef struct _RepoCryptCompat {\n    const char *magic;\n    const char *pwd_hash;\n    const char *pwd_hash_algo;\n    const char *pwd_hash_params;\n} RepoCryptInfo;\n\nstatic\nRepoCryptInfo *\nrepo_crypt_info_new (const char *magic, const char *pwd_hash,\n                       const char *algo, const char *params)\n{\n    RepoCryptInfo *crypt_info = g_new0 (RepoCryptInfo, 1);\n    crypt_info->magic = magic;\n    crypt_info->pwd_hash = pwd_hash;\n    crypt_info->pwd_hash_algo = algo;\n    crypt_info->pwd_hash_params = params;\n\n    return crypt_info;\n}\n\nstatic int\ncreate_repo_common (SeafRepoManager *mgr,\n                    const char *repo_id,\n                    const char *repo_name,\n                    const char *repo_desc,\n                    const char *user,\n                    const char *random_key,\n                    const char *salt,\n                    int enc_version,\n                    RepoCryptInfo *crypt_info,\n                    GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL;\n    SeafBranch *master = NULL;\n    int ret = -1;\n\n    if (enc_version != 4 && enc_version != 3 && enc_version != 2 && enc_version != -1) {\n        seaf_warning (\"Unsupported enc version %d.\\n\", enc_version);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Unsupported encryption version\");\n        return -1;\n    }\n    \n    if (crypt_info && crypt_info->pwd_hash_algo) {\n        if (g_strcmp0 (crypt_info->pwd_hash_algo, PWD_HASH_PDKDF2) != 0 &&\n            g_strcmp0 (crypt_info->pwd_hash_algo, PWD_HASH_ARGON2ID) !=0)\n        {\n            seaf_warning (\"Unsupported enc algothrims %s.\\n\", crypt_info->pwd_hash_algo);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Unsupported encryption algothrims\");\n            return -1;\n        }\n\n        if (!crypt_info->pwd_hash || strlen(crypt_info->pwd_hash) != 64) {\n            seaf_warning (\"Bad pwd_hash.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Bad pwd_hash\");\n            return -1;\n        }\n    }\n\n    if (enc_version >= 2) {\n        if (!crypt_info->pwd_hash_algo && (!crypt_info->magic || strlen(crypt_info->magic) != 64)) {\n            seaf_warning (\"Bad magic.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Bad magic\");\n            return -1;\n        }\n        if (!random_key || strlen(random_key) != 96) {\n            seaf_warning (\"Bad random key.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Bad random key\");\n            return -1;\n        }\n    }\n    if (enc_version >= 3) {\n        if (!salt || strlen(salt) != 64) {\n            seaf_warning (\"Bad salt.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Bad salt\");\n            return -1;\n        }\n    }\n\n    repo = seaf_repo_new (repo_id, repo_name, repo_desc);\n\n    repo->no_local_history = TRUE;\n\n    if (enc_version >= 2) {\n        repo->encrypted = TRUE;\n        repo->enc_version = enc_version;\n        if (!crypt_info->pwd_hash_algo)\n            memcpy (repo->magic, crypt_info->magic, 64);\n        memcpy (repo->random_key, random_key, 96);\n    }\n    if (enc_version >= 3)\n        memcpy (repo->salt, salt, 64);\n\n    if (crypt_info && crypt_info->pwd_hash_algo) {\n        // set pwd_hash fields here.\n        memcpy (repo->pwd_hash, crypt_info->pwd_hash, 64);\n        repo->pwd_hash_algo = g_strdup (crypt_info->pwd_hash_algo);\n        repo->pwd_hash_params = g_strdup (crypt_info->pwd_hash_params);\n    }\n\n    repo->version = CURRENT_REPO_VERSION;\n    memcpy (repo->store_id, repo_id, 36);\n\n    commit = seaf_commit_new (NULL, repo->id,\n                              EMPTY_SHA1, /* root id */\n                              user, /* creator */\n                              EMPTY_SHA1, /* creator id */\n                              \"Created library\",  /* description */\n                              0);         /* ctime */\n\n    seaf_repo_to_commit (repo, commit);\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        seaf_warning (\"Failed to add commit.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add commit\");\n        goto out;\n    }\n\n    master = seaf_branch_new (\"master\", repo->id, commit->commit_id);\n    if (seaf_branch_manager_add_branch (seaf->branch_mgr, master) < 0) {\n        seaf_warning (\"Failed to add branch.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add branch\");\n        goto out;\n    }\n\n    if (seaf_repo_set_head (repo, master) < 0) {\n        seaf_warning (\"Failed to set repo head.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to set repo head.\");\n        goto out;\n    }\n\n    if (seaf_repo_manager_add_repo (mgr, repo) < 0) {\n        seaf_warning (\"Failed to add repo.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add repo.\");\n        goto out;\n    }\n\n    seaf_repo_manager_update_repo_info (mgr, repo->id, repo->head->commit_id);\n\n    ret = 0;\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (commit)\n        seaf_commit_unref (commit);\n    if (master)\n        seaf_branch_unref (master);\n    \n    return ret;    \n}\n\nchar *\nseaf_repo_manager_create_new_repo (SeafRepoManager *mgr,\n                                   const char *repo_name,\n                                   const char *repo_desc,\n                                   const char *owner_email,\n                                   const char *passwd,\n                                   int enc_version,\n                                   const char *pwd_hash_algo,\n                                   const char *pwd_hash_params,\n                                   GError **error)\n{\n    char *repo_id = NULL;\n    char salt[65], magic[65], pwd_hash[65], random_key[97];\n    const char *algo = pwd_hash_algo;\n    const char *params = pwd_hash_params;\n\n    repo_id = gen_uuid ();\n\n    if (passwd && passwd[0] != 0) {\n        if (seafile_generate_repo_salt (salt) < 0) {\n            goto bad;\n        }\n        if (algo != NULL) {\n            seafile_generate_pwd_hash (enc_version, repo_id, passwd, salt, algo, params, pwd_hash);\n        } else {\n            seafile_generate_magic (enc_version, repo_id, passwd, salt, magic);\n        }\n        if (seafile_generate_random_key (passwd, enc_version, salt, random_key) < 0) {\n            goto bad;\n        }\n    }\n\n    int rc;\n    if (passwd) {\n        RepoCryptInfo *crypt_info = repo_crypt_info_new (magic, pwd_hash, algo, params);\n        rc = create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email,\n                                 random_key, salt, enc_version, crypt_info, error);\n        g_free (crypt_info);\n    }\n    else\n        rc = create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email,\n                                 NULL, NULL, -1, NULL, error);\n    if (rc < 0)\n        goto bad;\n\n    if (seaf_repo_manager_set_repo_owner (mgr, repo_id, owner_email) < 0) {\n        seaf_warning (\"Failed to set repo owner.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to set repo owner.\");\n        goto bad;\n    }\n\n    return repo_id;\n    \nbad:\n    if (repo_id)\n        g_free (repo_id);\n    return NULL;\n}\n\nchar *\nseaf_repo_manager_create_enc_repo (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *repo_name,\n                                   const char *repo_desc,\n                                   const char *owner_email,\n                                   const char *magic,\n                                   const char *random_key,\n                                   const char *salt,\n                                   int enc_version,\n                                   const char *pwd_hash,\n                                   const char *pwd_hash_algo,\n                                   const char *pwd_hash_params,\n                                   GError **error)\n{\n    if (!repo_id || !is_uuid_valid (repo_id)) {\n        seaf_warning (\"Invalid repo_id.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid repo id\");\n        return NULL;\n    }\n\n    if (seaf_repo_manager_repo_exists (mgr, repo_id)) {\n        seaf_warning (\"Repo %s exists, refuse to create.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Repo already exists\");\n        return NULL;\n    }\n\n    RepoCryptInfo *crypt_info = repo_crypt_info_new (magic, pwd_hash, pwd_hash_algo, pwd_hash_params);\n    if (create_repo_common (mgr, repo_id, repo_name, repo_desc, owner_email,\n                            random_key, salt, enc_version, crypt_info, error) < 0) {\n        g_free (crypt_info);\n        return NULL;\n    }\n    g_free (crypt_info);\n\n    if (seaf_repo_manager_set_repo_owner (mgr, repo_id, owner_email) < 0) {\n        seaf_warning (\"Failed to set repo owner.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to set repo owner.\");\n        return NULL;\n    }\n\n    return g_strdup (repo_id);\n}\n\nstatic int reap_token (void *data)\n{\n    SeafRepoManager *mgr = data;\n    GHashTableIter iter;\n    gpointer key, value;\n    DecryptedToken *t;\n\n    pthread_rwlock_wrlock (&mgr->priv->lock);\n\n    gint64 now = (gint64)time(NULL);\n\n    g_hash_table_iter_init (&iter, mgr->priv->decrypted_tokens);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        t = value;\n        if (now >= t->reap_time)\n            g_hash_table_iter_remove (&iter);\n    }\n\n    pthread_rwlock_unlock (&mgr->priv->lock);\n\n    return TRUE;\n}\n\nstatic void decrypted_token_free (DecryptedToken *token)\n{\n    if (!token)\n        return;\n    g_free (token->token);\n    g_free (token);\n}\n\nvoid\nseaf_repo_manager_add_decrypted_token (SeafRepoManager *mgr,\n                                       const char *encrypted_token,\n                                       const char *session_key,\n                                       const char *decrypted_token)\n{\n    char key[256];\n    DecryptedToken *token;\n\n    snprintf (key, sizeof(key), \"%s%s\", encrypted_token, session_key);\n    key[255] = 0;\n\n    pthread_rwlock_wrlock (&mgr->priv->lock);\n\n    token = g_new0 (DecryptedToken, 1);\n    token->token = g_strdup(decrypted_token);\n    token->reap_time = (gint64)time(NULL) + DECRYPTED_TOKEN_TTL;\n\n    g_hash_table_insert (mgr->priv->decrypted_tokens,\n                         g_strdup(key),\n                         token);\n\n    pthread_rwlock_unlock (&mgr->priv->lock);\n}\n\nchar *\nseaf_repo_manager_get_decrypted_token (SeafRepoManager *mgr,\n                                       const char *encrypted_token,\n                                       const char *session_key)\n{\n    char key[256];\n    DecryptedToken *token;\n\n    snprintf (key, sizeof(key), \"%s%s\", encrypted_token, session_key);\n    key[255] = 0;\n\n    pthread_rwlock_rdlock (&mgr->priv->lock);\n    token = g_hash_table_lookup (mgr->priv->decrypted_tokens, key);\n    pthread_rwlock_unlock (&mgr->priv->lock);\n\n    if (token)\n        return g_strdup(token->token);\n    return NULL;\n}\n\nstatic gboolean\nget_shared_users (SeafDBRow *row, void *data)\n{\n    GList **shared_users = data;\n    const char *user = seaf_db_row_get_column_text (row, 0);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n    const char *repo_id = seaf_db_row_get_column_text (row, 2);\n\n    SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER,\n                                            \"repo_id\", repo_id,\n                                            \"user\", user,\n                                            \"perm\", perm,\n                                            NULL);\n    *shared_users = g_list_prepend (*shared_users, uobj);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_shared_users_for_subdir (SeafRepoManager *mgr,\n                                               const char *repo_id,\n                                               const char *path,\n                                               const char *from_user,\n                                               GError **error)\n{\n    GList *shared_users = NULL;\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                             \"SELECT to_email, permission, v.repo_id \"\n                                             \"FROM SharedRepo s, VirtualRepo v \"\n                                             \"WHERE s.repo_id = v.repo_id AND v.origin_repo = ? \"\n                                             \"AND v.path = ? AND s.from_email = ?\",\n                                             get_shared_users, &shared_users, 3, \"string\", repo_id,\n                                             \"string\", path, \"string\", from_user);\n    if (ret < 0) {\n        seaf_warning (\"Failed to get shared users for %.8s(%s).\\n\", repo_id, path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get shared users for subdir from db\");\n        while (shared_users) {\n            g_object_unref (shared_users->data);\n            shared_users = g_list_delete_link (shared_users, shared_users);\n        }\n        return NULL;\n    }\n\n    return shared_users;\n}\n\nstatic gboolean\nget_shared_groups (SeafDBRow *row, void *data)\n{\n    GList **shared_groups = data;\n    int group = seaf_db_row_get_column_int (row, 0);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n    const char *repo_id = seaf_db_row_get_column_text (row, 2);\n\n    SeafileSharedGroup *gobj = g_object_new (SEAFILE_TYPE_SHARED_GROUP,\n                                             \"repo_id\", repo_id,\n                                             \"group_id\", group,\n                                             \"perm\", perm,\n                                             NULL);\n\n    *shared_groups = g_list_prepend (*shared_groups, gobj);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_shared_groups_for_subdir (SeafRepoManager *mgr,\n                                                const char *repo_id,\n                                                const char *path,\n                                                const char *from_user,\n                                                GError **error)\n{\n    GList *shared_groups = NULL;\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                             \"SELECT group_id, permission, v.repo_id \"\n                                             \"FROM RepoGroup r, VirtualRepo v \"\n                                             \"WHERE r.repo_id = v.repo_id AND v.origin_repo = ? \"\n                                             \"AND v.path = ? AND r.user_name = ?\",\n                                             get_shared_groups, &shared_groups, 3, \"string\", repo_id,\n                                             \"string\", path, \"string\", from_user);\n    if (ret < 0) {\n        seaf_warning (\"Failed to get shared groups for %.8s(%s).\\n\", repo_id, path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get shared groups fro subdir from db\");\n        while (shared_groups) {\n            g_object_unref (shared_groups->data);\n            shared_groups = g_list_delete_link (shared_groups, shared_groups);\n        }\n        return NULL;\n    }\n\n    return shared_groups;\n}\nint\nseaf_repo_manager_edit_repo (const char *repo_id,\n                             const char *name,\n                             const char *description,\n                             const char *user,\n                             GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL, *parent = NULL;\n    int ret = 0;\n\n    if (!name && !description) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"At least one argument should be non-null\");\n        return -1;\n    }\n\n    if (!is_uuid_valid (repo_id)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo id\");\n        return -1;\n    }\n\nretry:\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"No such library\");\n        return -1;\n    }\n    if (!name)\n        name = repo->name;\n    if (!description)\n        description = repo->desc;\n\n    /*\n     * We only change repo_name or repo_desc, so just copy the head commit\n     * and change these two fields.\n     */\n    parent = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             repo->head->commit_id);\n    if (!parent) {\n        seaf_warning (\"Failed to get commit %s:%s.\\n\",\n                      repo->id, repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n    if (!user) {\n        user = parent->creator_name;\n    }\n\n    commit = seaf_commit_new (NULL,\n                              repo->id,\n                              parent->root_id,\n                              user,\n                              EMPTY_SHA1,\n                              \"Changed library name or description\",\n                              0);\n    commit->parent_id = g_strdup(parent->commit_id);\n    seaf_repo_to_commit (repo, commit);\n\n    g_free (commit->repo_name);\n    commit->repo_name = g_strdup(name);\n    g_free (commit->repo_desc);\n    commit->repo_desc = g_strdup(description);\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_branch_set_commit (repo->head, commit->commit_id);\n    if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,\n                                                    repo->head,\n                                                    parent->commit_id,\n                                                    FALSE, NULL, NULL, NULL) < 0) {\n        seaf_repo_unref (repo);\n        seaf_commit_unref (commit);\n        seaf_commit_unref (parent);\n        repo = NULL;\n        commit = NULL;\n        parent = NULL;\n        goto retry;\n    }\n\n    seaf_repo_manager_update_repo_info (seaf->repo_mgr, repo_id, repo->head->commit_id);\n\nout:\n    seaf_commit_unref (commit);\n    seaf_commit_unref (parent);\n    seaf_repo_unref (repo);\n\n    return ret;\n}\n\ngboolean\nget_total_file_number_cb (SeafDBRow *row, void *vdata)\n{\n    gint64 *data = (gint64 *)vdata;\n    gint64 count = seaf_db_row_get_column_int64 (row, 0);\n    *data = count;\n\n    return FALSE;\n}\n\ngint64\nseaf_get_total_file_number (GError **error)\n{\n    gint64 count = 0;\n    int ret = seaf_db_statement_foreach_row (seaf->db,\n                                             \"SELECT SUM(file_count) FROM RepoFileCount f \"\n                                             \"LEFT JOIN VirtualRepo v \"\n                                             \"ON f.repo_id=v.repo_id,\"\n                                             \"Repo r \"\n                                             \"WHERE v.repo_id IS NULL AND \"\n                                             \"f.repo_id=r.repo_id\",\n                                             get_total_file_number_cb,\n                                             &count, 0);\n    if (ret < 0) { \n        seaf_warning (\"Failed to get total file number.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get total file number from db.\");\n        return -1;\n    }\n\n    return count;\n}\n\ngboolean\nget_total_storage_cb(SeafDBRow *row, void *vdata)\n{\n    gint64 *data = (gint64 *)vdata;\n    gint64 size = seaf_db_row_get_column_int64 (row, 0);\n    *data = size;\n\n    return FALSE;\n}\n\ngint64\nseaf_get_total_storage (GError **error)\n{\n    gint64 size = 0;\n    int ret;\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_PGSQL) {\n        ret = seaf_db_statement_foreach_row (seaf->db,\n                                             \"SELECT SUM(\\\"size\\\") FROM RepoSize s \"\n                                             \"LEFT JOIN VirtualRepo v \"\n                                             \"ON s.repo_id=v.repo_id \"\n                                             \"WHERE v.repo_id IS NULL\",\n                                             get_total_storage_cb,\n                                             &size, 0);\n    } else {\n        ret = seaf_db_statement_foreach_row (seaf->db,\n                                             \"SELECT SUM(size) FROM RepoSize s \"\n                                             \"LEFT JOIN VirtualRepo v \"\n                                             \"ON s.repo_id=v.repo_id \"\n                                             \"WHERE v.repo_id IS NULL\",\n                                             get_total_storage_cb,\n                                             &size, 0);\n    }\n    if (ret < 0) {\n        seaf_warning (\"Failed to get total storage occupation.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get total storage occupation from db.\");\n        return -1;\n    }\n\n    return size;\n}\n\n/* Online GC related */\n\nchar *\nseaf_repo_get_current_gc_id (SeafRepo *repo)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        return NULL;\n\n    char *sql = \"SELECT gc_id FROM GCID WHERE repo_id = ?\";\n    char *gc_id;\n\n    if (!repo->virtual_info)\n        gc_id = seaf_db_statement_get_string (seaf->db, sql, 1, \"string\", repo->id);\n    else {\n        gc_id = seaf_db_statement_get_string (seaf->db, sql, 1, \"string\", repo->store_id);\n    }\n\n    return gc_id;\n}\n\nchar *\nseaf_repo_get_last_gc_id (SeafRepo *repo, const char *client_id)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        return NULL;\n\n    char *sql = \"SELECT gc_id FROM LastGCID WHERE repo_id = ? AND client_id = ?\";\n    char *gc_id;\n\n    gc_id = seaf_db_statement_get_string (seaf->db, sql,\n                                          2, \"string\", repo->id,\n                                          \"string\", client_id);\n\n    return gc_id;\n}\n\ngboolean\nseaf_repo_has_last_gc_id (SeafRepo *repo, const char *client_id)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        return FALSE;\n\n    char *sql = \"SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?\";\n    gboolean db_err;\n\n    return seaf_db_statement_exists (seaf->db, sql, &db_err,\n                                     2, \"string\", repo->id, \"string\", client_id);\n}\n\nint\nseaf_repo_set_last_gc_id (SeafRepo *repo,\n                          const char *client_id,\n                          const char *gc_id)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        return 0;\n\n    gboolean id_exists, db_err = FALSE;\n    char *sql;\n    int ret = 0;\n\n    sql = \"SELECT 1 FROM LastGCID WHERE repo_id = ? AND client_id = ?\";\n    id_exists = seaf_db_statement_exists (seaf->db, sql, &db_err,\n                                          2, \"string\", repo->id, \"string\", client_id);\n    if (id_exists) {\n        sql = \"UPDATE LastGCID SET gc_id = ? WHERE repo_id = ? AND client_id = ?\";\n        ret = seaf_db_statement_query (seaf->db, sql,\n                                       3, \"string\", gc_id,\n                                       \"string\", repo->id, \"string\", client_id);\n    } else {\n        sql = \"INSERT INTO LastGCID (repo_id, client_id, gc_id) VALUES (?, ?, ?)\";\n        ret = seaf_db_statement_query (seaf->db, sql,\n                                       3, \"string\", repo->id,\n                                       \"string\", client_id, \"string\", gc_id);\n    }\n\n    return ret;\n}\n\nint\nseaf_repo_remove_last_gc_id (SeafRepo *repo,\n                             const char *client_id)\n{\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        return 0;\n\n    char *sql = \"DELETE FROM LastGCID WHERE repo_id = ? AND client_id = ?\";\n    seaf_db_statement_query (seaf->db, sql, 2, \"string\", repo->id, \"string\", client_id);\n    return 0;\n}\n\nint\nseaf_repo_manager_add_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       const char *tmp_file,\n                                       GError **error)\n{\n    char *file_path_with_slash = NULL;\n\n    if (file_path[0] == '/') {\n        file_path_with_slash = g_strdup(file_path);\n    } else {\n        file_path_with_slash = g_strconcat(\"/\", file_path, NULL);\n    }\n\n    int ret = seaf_db_statement_query (mgr->seaf->db,\n                                       \"INSERT INTO WebUploadTempFiles \"\n                                       \"(repo_id, file_path, tmp_file_path) \"\n                                       \"VALUES (?, ?, ?)\", 3, \"string\", repo_id,\n                                       \"string\", file_path_with_slash, \"string\", tmp_file);\n\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add upload tmp file record to db.\");\n    }\n\n    g_free (file_path_with_slash);\n    return ret;\n}\n\nint\nseaf_repo_manager_del_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       GError **error)\n{\n    char *file_path_with_slash = NULL, *file_path_no_slash = NULL;\n\n    /* Due to a bug in early versions of 7.0, some file_path may be stored in the db without\n     * a leading slash. To be compatible with those records, we need to check the path\n     * with and without leading slash.\n     */\n    if (file_path[0] == '/') {\n        file_path_with_slash = g_strdup(file_path);\n        file_path_no_slash = g_strdup(file_path+1);\n    } else {\n        file_path_with_slash = g_strconcat(\"/\", file_path, NULL);\n        file_path_no_slash = g_strdup(file_path);\n    }\n\n    int ret = seaf_db_statement_query (mgr->seaf->db,\n                                       \"DELETE FROM WebUploadTempFiles WHERE \"\n                                       \"repo_id = ? AND file_path IN (?, ?)\",\n                                       3, \"string\", repo_id,\n                                       \"string\", file_path_with_slash,\n                                       \"string\", file_path_no_slash);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to delete upload tmp file record from db.\");\n    }\n\n    g_free (file_path_with_slash);\n    g_free (file_path_no_slash);\n    return ret;\n}\n\nstatic gboolean\nget_tmp_file_path (SeafDBRow *row, void *data)\n{\n    char **path = data;\n\n    *path = g_strdup (seaf_db_row_get_column_text (row, 0));\n\n    return FALSE;\n}\n\nchar *\nseaf_repo_manager_get_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       GError **error)\n{\n    char *tmp_file_path = NULL;\n    char *file_path_with_slash = NULL, *file_path_no_slash = NULL;\n\n    /* Due to a bug in early versions of 7.0, some file_path may be stored in the db without\n     * a leading slash. To be compatible with those records, we need to check the path\n     * with and without leading slash.\n     * The correct file_path in db should be with a leading slash.\n     */\n    if (file_path[0] == '/') {\n        file_path_with_slash = g_strdup(file_path);\n        file_path_no_slash = g_strdup(file_path+1);\n    } else {\n        file_path_with_slash = g_strconcat(\"/\", file_path, NULL);\n        file_path_no_slash = g_strdup(file_path);\n    }\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                             \"SELECT tmp_file_path FROM WebUploadTempFiles \"\n                                             \"WHERE repo_id = ? AND file_path = ?\",\n                                             get_tmp_file_path, &tmp_file_path,\n                                             2, \"string\", repo_id,\n                                             \"string\", file_path_with_slash);\n    if (ret < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get upload temp file path from db.\");\n        goto out;\n    }\n\n    if (!tmp_file_path) {\n        /* Try file_path without slash. */\n        int ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                                 \"SELECT tmp_file_path FROM WebUploadTempFiles \"\n                                                 \"WHERE repo_id = ? AND file_path = ?\",\n                                                 get_tmp_file_path, &tmp_file_path,\n                                                 2, \"string\", repo_id,\n                                                 \"string\", file_path_no_slash);\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Failed to get upload temp file path from db.\");\n            goto out;\n        }\n    }\n\nout:\n    g_free (file_path_with_slash);\n    g_free (file_path_no_slash);\n    return tmp_file_path;\n}\n\ngint64\nseaf_repo_manager_get_upload_tmp_file_offset (SeafRepoManager *mgr,\n                                              const char *repo_id,\n                                              const char *file_path,\n                                              GError **error)\n{\n    char *tmp_file_path = NULL;\n    SeafStat file_stat;\n\n    tmp_file_path = seaf_repo_manager_get_upload_tmp_file (mgr, repo_id,\n                                                           file_path, error);\n    if (*error) {\n        return -1;\n    }\n\n    if (!tmp_file_path)\n        return 0;\n\n    if (seaf_stat (tmp_file_path, &file_stat) < 0) {\n        if (errno == ENOENT) {\n            seaf_message (\"Temp file %s doesn't exist, remove reocrd from db.\\n\",\n                          tmp_file_path);\n            if (seaf_repo_manager_del_upload_tmp_file (mgr, repo_id,\n                                                       file_path, error) < 0) {\n                g_free (tmp_file_path);\n                return -1;\n            }\n            return 0;\n        }\n        seaf_warning (\"Failed to stat temp file %s: %s.\\n\",\n                      tmp_file_path, strerror(errno));\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to stat temp file.\");\n        g_free (tmp_file_path);\n        return -1;\n    }\n\n    g_free (tmp_file_path);\n\n    return file_stat.st_size;\n}\n\nvoid\nseaf_repo_manager_update_repo_info (SeafRepoManager *mgr,\n                                    const char *repo_id, const char *head_commit_id)\n{\n    SeafCommit *head;\n\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo_id, 1, head_commit_id);\n    if (!head) {\n        seaf_warning (\"Failed to get commit %s:%s.\\n\", repo_id, head_commit_id);\n        return;\n    }\n\n    set_repo_commit_to_db (repo_id, head->repo_name, head->ctime, head->version,\n                           (head->encrypted ? 1 : 0), head->creator_name);\n\n    seaf_commit_unref (head);\n}\n\nchar *\nseaf_get_trash_repo_owner (const char *repo_id)\n{\n    char *sql = \"SELECT owner_id from RepoTrash WHERE repo_id = ?\";\n    return seaf_db_statement_get_string(seaf->db, sql, 1, \"string\", repo_id);\n}\n\nGObject *\nseaf_get_group_shared_repo_by_path (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *path,\n                                    int group_id,\n                                    gboolean is_org,\n                                    GError **error)\n{\n    char *sql;\n    char *real_repo_id = NULL;\n    GList *repo = NULL;\n    GObject *ret = NULL;\n\n    /* If path is NULL, 'repo_id' represents for the repo we want,\n     * otherwise, 'repo_id' represents for the origin repo,\n     * find virtual repo by path first.\n     */\n    if (path != NULL) {\n        real_repo_id = seaf_repo_manager_get_virtual_repo_id (mgr, repo_id, path, NULL);\n        if (!real_repo_id) {\n            seaf_warning (\"Failed to get virtual repo_id by path %s, origin_repo: %s\\n\", path, repo_id);\n            return NULL;\n        }\n    }\n    if (!real_repo_id)\n        real_repo_id = g_strdup (repo_id);\n\n    if (!is_org)\n        sql = \"SELECT RepoGroup.repo_id, v.repo_id, \"\n              \"group_id, user_name, permission, commit_id, s.size, \"\n              \"v.origin_repo, v.path, i.name, \"\n              \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name \"\n              \"FROM RepoGroup LEFT JOIN VirtualRepo v ON \"\n              \"RepoGroup.repo_id = v.repo_id \"\n              \"LEFT JOIN RepoInfo i ON RepoGroup.repo_id = i.repo_id \"\n              \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id \"\n              \"LEFT JOIN RepoSize s ON RepoGroup.repo_id = s.repo_id, \"\n              \"Branch WHERE group_id = ? AND \"\n              \"RepoGroup.repo_id = Branch.repo_id AND \"\n              \"RepoGroup.repo_id = ? AND \"\n              \"Branch.name = 'master'\";\n    else\n        sql = \"SELECT OrgGroupRepo.repo_id, v.repo_id, \"\n              \"group_id, owner, permission, commit_id, s.size, \"\n              \"v.origin_repo, v.path, i.name, \"\n              \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name \"\n              \"FROM OrgGroupRepo LEFT JOIN VirtualRepo v ON \"\n              \"OrgGroupRepo.repo_id = v.repo_id \"\n              \"LEFT JOIN RepoInfo i ON OrgRepoGroup.repo_id = i.repo_id \"\n              \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id \"\n              \"LEFT JOIN RepoSize s ON OrgGroupRepo.repo_id = s.repo_id, \"\n              \"Branch WHERE group_id = ? AND \"\n              \"OrgGroupRepo.repo_id = Branch.repo_id AND \"\n              \"OrgGroupRepo.repo_id = ? AND \"\n              \"Branch.name = 'master'\";\n\n    /* The list 'repo' should have only one repo,\n     * use existing api get_group_repos_cb() to get it.\n     */\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_group_repos_cb,\n                                       &repo, 2, \"int\", group_id,\n                                       \"string\", real_repo_id) < 0) {\n        g_free (real_repo_id);\n        g_list_free (repo);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get repo by group_id from db.\");\n        return NULL;\n    }\n    g_free (real_repo_id);\n\n    if (repo) {\n        seaf_fill_repo_obj_from_commit (&repo);\n        if (repo)\n            ret = (GObject *)(repo->data);\n        g_list_free (repo);\n    }\n\n    return ret;\n}\n\nGList *\nseaf_get_group_repos_by_user (SeafRepoManager *mgr,\n                              const char *user,\n                              int org_id,\n                              GError **error)\n{\n    CcnetGroup *group;\n    GList *groups = NULL, *p, *q;\n    GList *repos = NULL;\n    SeafileRepo *repo = NULL;\n    GString *sql = NULL;\n    int group_id = 0;\n\n    /* Get the groups this user belongs to. */\n    groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user,\n                                                     1, NULL);\n    if (!groups) {\n        goto out;\n    }\n\n    sql = g_string_new (\"\");\n    g_string_printf (sql, \"SELECT g.repo_id, v.repo_id, \"\n                          \"group_id, %s, permission, commit_id, s.size, \"\n                          \"v.origin_repo, v.path, i.name, \"\n                          \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, i2.name \"\n                          \"FROM %s g LEFT JOIN VirtualRepo v ON \"\n                          \"g.repo_id = v.repo_id \"\n                          \"LEFT JOIN RepoInfo i ON g.repo_id = i.repo_id \"\n                          \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id \"\n                          \"LEFT JOIN RepoSize s ON g.repo_id = s.repo_id, \"\n                          \"Branch b WHERE g.repo_id = b.repo_id AND \"\n                          \"b.name = 'master' AND group_id IN (\",\n                          org_id < 0 ? \"user_name\" : \"owner\",\n                          org_id < 0 ? \"RepoGroup\" : \"OrgGroupRepo\");\n    for (p = groups; p != NULL; p = p->next) {\n        group = p->data;\n        g_object_get (group, \"id\", &group_id, NULL);\n\n        g_string_append_printf (sql, \"%d\", group_id);\n        if (p->next)\n            g_string_append_printf (sql, \",\");\n    }\n    g_string_append_printf (sql, \" ) ORDER BY group_id\");\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql->str, get_group_repos_cb,\n                                       &repos, 0) < 0) {\n        for (p = repos; p; p = p->next) {\n            g_object_unref (p->data);\n        }\n        g_list_free (repos);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get user group repos from db.\");\n        seaf_warning (\"Failed to get user[%s] group repos from db.\\n\", user);\n        goto out;\n    }\n\n    int repo_group_id = 0;\n    char *group_name = NULL;\n    q = repos;\n\n    /* Add group_name to repo. Both groups and repos are listed by group_id in descending order */\n    for (p = groups; p; p = p->next) {\n        group = p->data;\n        g_object_get (group, \"id\", &group_id, NULL);\n        g_object_get (group, \"group_name\", &group_name, NULL);\n\n        for (; q; q = q->next) {\n            repo = q->data;\n            g_object_get (repo, \"group_id\", &repo_group_id, NULL);\n            if (repo_group_id == group_id)\n                g_object_set (repo, \"group_name\", group_name, NULL);\n            else\n                break;\n        }\n        g_free (group_name);\n        if (q == NULL)\n            break;\n    }\n\n    seaf_fill_repo_obj_from_commit (&repos);\n\nout:\n    if (sql)\n        g_string_free (sql, TRUE);\n\n    for (p = groups; p != NULL; p = p->next)\n        g_object_unref ((GObject *)p->data);\n    g_list_free (groups);\n\n    return g_list_reverse (repos);\n}\n\ntypedef struct RepoPath {\n    char *repo_id;\n    char *path;\n    int group_id;\n} RepoPath;\n\n\ngboolean\nconvert_repo_path_cb (SeafDBRow *row, void *data)\n{\n    GList **repo_paths = data;\n\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    const char *path = seaf_db_row_get_column_text (row, 1);\n    int group_id = seaf_db_row_get_column_int (row, 2);\n\n    RepoPath *rp = g_new0(RepoPath, 1);\n    rp->repo_id = g_strdup(repo_id);\n    rp->path = g_strdup(path);\n    rp->group_id = group_id;\n    *repo_paths = g_list_append (*repo_paths, rp);\n\n    return TRUE;\n}\n\nstatic void\nfree_repo_path (gpointer data)\n{\n    if (!data)\n        return;\n\n    RepoPath *rp = data;\n    g_free (rp->repo_id);\n    g_free (rp->path);\n    g_free (rp);\n}\n\nstatic char *\nfilter_path (GList *repo_paths, const char *path)\n{\n    GList *ptr = NULL;\n    int len;\n    const char *relative_path;\n    char *ret = NULL;\n    RepoPath *rp = NULL, res;\n    res.repo_id = NULL;\n    res.path = NULL;\n    res.group_id = 0;\n\n    /* Find nearest item which contains @path, */\n    for (ptr = repo_paths; ptr; ptr = ptr->next) {\n        rp = ptr->data;\n        len = strlen(rp->path);\n        if (strncmp(rp->path, path, len) == 0 && (path[len] == '/' || path[len] == '\\0')) {\n\n            if (g_strcmp0(rp->path, res.path) > 0) {\n                res.path = rp->path;\n                res.repo_id = rp->repo_id;\n                res.group_id = rp->group_id;\n            }\n        }\n    }\n    if (res.repo_id && res.path) {\n        relative_path = path + strlen(res.path);\n        if (relative_path[0] == '\\0')\n            relative_path = \"/\";\n\n        json_t *json = json_object ();\n        json_object_set_string_member(json, \"repo_id\", res.repo_id);\n        json_object_set_string_member(json, \"path\", relative_path);\n        if (res.group_id > 0)\n            json_object_set_int_member(json, \"group_id\", res.group_id);\n        ret = json_dumps (json, 0);\n        json_decref (json);\n    }\n\n    return ret;\n}\n\n/* Convert origin repo and path to virtual repo and relative path */\nchar *\nseaf_repo_manager_convert_repo_path (SeafRepoManager *mgr,\n                                     const char *repo_id,\n                                     const char *path,\n                                     const char *user,\n                                     gboolean is_org,\n                                     GError **error)\n{\n    char *ret = NULL;\n    int rc;\n    int group_id;\n    GString *sql;\n    CcnetGroup *group;\n    GList *groups = NULL, *p1;\n    GList *repo_paths = NULL;\n    SeafVirtRepo *vinfo = NULL;\n    const char *r_repo_id = repo_id;\n    char *r_path = NULL;\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo) {\n        r_repo_id = vinfo->origin_repo_id;\n        r_path = g_strconcat (vinfo->path, path, NULL);\n    } else {\n        r_path = g_strdup(path);\n    }\n\n    sql = g_string_new (\"\");\n    g_string_printf (sql, \"SELECT v.repo_id, path, 0 FROM VirtualRepo v, %s s WHERE \"\n                     \"v.origin_repo=? AND v.repo_id=s.repo_id AND s.to_email=?\",\n                     is_org ? \"OrgSharedRepo\" : \"SharedRepo\");\n    rc = seaf_db_statement_foreach_row (seaf->db,\n                                        sql->str, convert_repo_path_cb,\n                                        &repo_paths, 2,\n                                        \"string\", r_repo_id, \"string\", user);\n    if (rc < 0) {\n        seaf_warning(\"Failed to convert repo path [%s:%s] to virtual repo path, db_error.\\n\",\n                     repo_id, path);\n        goto out;\n    }\n    ret = filter_path(repo_paths, r_path);\n    g_list_free_full(repo_paths, free_repo_path);\n    repo_paths = NULL;\n    if (ret)\n        goto out;\n\n    /* Get the groups this user belongs to. */\n\n    groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user,\n                                                     1, NULL);\n    if (!groups) {\n        goto out;\n    }\n\n    g_string_printf (sql, \"SELECT v.repo_id, path, r.group_id FROM VirtualRepo v, %s r WHERE \"\n                     \"v.origin_repo=? AND v.repo_id=r.repo_id AND r.group_id IN(\",\n                     is_org ? \"OrgGroupRepo\" : \"RepoGroup\");\n    for (p1 = groups; p1 != NULL; p1 = p1->next) {\n        group = p1->data;\n        g_object_get (group, \"id\", &group_id, NULL);\n\n        g_string_append_printf (sql, \"%d\", group_id);\n        if (p1->next)\n            g_string_append_printf (sql, \",\");\n    }\n    g_string_append_printf (sql, \")\");\n\n    rc = seaf_db_statement_foreach_row (seaf->db,\n                                        sql->str, convert_repo_path_cb,\n                                        &repo_paths, 1,\n                                        \"string\", r_repo_id);\n    if (rc < 0) {\n        seaf_warning(\"Failed to convert repo path [%s:%s] to virtual repo path, db error.\\n\",\n                     repo_id, path);\n        g_string_free (sql, TRUE);\n        goto out;\n    }\n    ret = filter_path(repo_paths, r_path);\n    g_list_free_full(repo_paths, free_repo_path);\n\nout:\n    g_free (r_path);\n    if (vinfo)\n        seaf_virtual_repo_info_free (vinfo);\n    g_string_free (sql, TRUE);\n    for (p1 = groups; p1 != NULL; p1 = p1->next)\n        g_object_unref ((GObject *)p1->data);\n    g_list_free (groups);\n\n    return ret;\n}\n\nint\nseaf_repo_manager_set_repo_status(SeafRepoManager *mgr,\n                                  const char *repo_id, RepoStatus status)\n{\n    int ret = 0;\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"UPDATE RepoInfo SET status=? \"\n                                 \"WHERE repo_id=? OR repo_id IN \"\n                                 \"(SELECT repo_id FROM VirtualRepo WHERE origin_repo=?)\",\n                                 3, \"int\", status,\n                                 \"string\", repo_id, \"string\", repo_id) < 0)\n        ret = -1;\n\n    return ret;\n}\n\nint\nseaf_repo_manager_get_repo_status(SeafRepoManager *mgr,\n                                  const char *repo_id)\n{\n    // First, check origin repo's status\n    char *sql = \"SELECT i.status FROM VirtualRepo v LEFT JOIN RepoInfo i \"\n          \"ON i.repo_id=v.origin_repo WHERE v.repo_id=? \"\n          \"AND i.repo_id IS NOT NULL\";\n    int status = seaf_db_statement_get_int (mgr->seaf->db, sql,\n                                        1, \"string\", repo_id);\n    if (status >= 0) {\n        return status;\n    }\n\n    // Then check repo's own status\n    sql = \"SELECT status FROM RepoInfo WHERE repo_id=?\";\n    status = seaf_db_statement_get_int (mgr->seaf->db, sql,\n                                            1, \"string\", repo_id);\n    return status;\n}\n"
  },
  {
    "path": "server/repo-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAF_REPO_MGR_H\n#define SEAF_REPO_MGR_H\n\n#include \"seafile-object.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n\ntypedef enum RepoStatus {\n    REPO_STATUS_NORMAL,\n    REPO_STATUS_READ_ONLY,\n    N_REPO_STATUS,\n} RepoStatus;\n\nstruct _SeafRepoManager;\ntypedef struct _SeafRepo SeafRepo;\n\ntypedef struct SeafVirtRepo {\n    char        repo_id[37];\n    char        origin_repo_id[37];\n    char        *path;\n    char        base_commit[41];\n} SeafVirtRepo;\n\nstruct _SeafRepo {\n    struct _SeafRepoManager *manager;\n\n    gchar       id[37];\n    gchar      *name;\n    gchar      *desc;\n    gchar      *last_modifier;\n    gboolean    encrypted;\n    int         enc_version;\n    gchar       magic[65];       /* hash(repo_id + passwd), key stretched. */\n    gchar       pwd_hash[65];       /* hash(repo_id + passwd), key stretched. */\n    gchar       *pwd_hash_algo;\n    gchar       *pwd_hash_params;\n    gchar       random_key[97];\n    gchar       salt[65];\n    gboolean    no_local_history;\n    gint64      last_modify;\n    gint64      size;\n    gint64      file_count;\n    gchar       *type;\n\n    int         status;\n\n    SeafBranch *head;\n    gchar root_id[41];\n\n    gboolean    is_corrupted;\n    gboolean    repaired;\n    int         ref_cnt;\n\n    SeafVirtRepo *virtual_info;\n\n    int version;\n    /* Used to access fs and block sotre.\n     * This id is different from repo_id when this repo is virtual.\n     * Virtual repos share fs and block store with its origin repo.\n     * However, commit store for each repo is always independent.\n     * So always use repo_id to access commit store.\n     */\n    gchar       store_id[37];\n};\n\ngboolean is_repo_id_valid (const char *id);\n\nSeafRepo* \nseaf_repo_new (const char *id, const char *name, const char *desc);\n\nvoid\nseaf_repo_free (SeafRepo *repo);\n\nvoid\nseaf_repo_ref (SeafRepo *repo);\n\nvoid\nseaf_repo_unref (SeafRepo *repo);\n\nint\nseaf_repo_set_head (SeafRepo *repo, SeafBranch *branch);\n\n/* Update repo name, desc, magic etc from commit.\n */\nvoid\nseaf_repo_from_commit (SeafRepo *repo, SeafCommit *commit);\n\nvoid\nseaf_fill_repo_obj_from_commit (GList **repos);\n\n/* Update repo-related fields to commit. \n */\nvoid\nseaf_repo_to_commit (SeafRepo *repo, SeafCommit *commit);\n\n/*\n * Returns a list of all commits belongs to the repo.\n * The commits in the repos are all unique.\n */\nGList *\nseaf_repo_get_commits (SeafRepo *repo);\n\nGList *\nseaf_repo_diff (SeafRepo *repo, const char *arg1, const char *arg2, int fold_dir_results, char **error);\n\ntypedef struct _SeafRepoManager SeafRepoManager;\ntypedef struct _SeafRepoManagerPriv SeafRepoManagerPriv;\n\nstruct _SeafRepoManager {\n    struct _SeafileSession *seaf;\n\n    SeafRepoManagerPriv *priv;\n};\n\nSeafRepoManager* \nseaf_repo_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_repo_manager_init (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_start (SeafRepoManager *mgr);\n\n/*\n * Repo Management functions. \n */\n\nint\nseaf_repo_manager_add_repo (SeafRepoManager *mgr, SeafRepo *repo);\n\nint\nseaf_repo_manager_del_repo (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            GError **error);\n\nint\nseaf_repo_manager_del_virtual_repo (SeafRepoManager *mgr,\n                                    const char *repo_id);\n\nSeafRepo* \nseaf_repo_manager_get_repo (SeafRepoManager *manager, const gchar *id);\n\n/* Return repo object even if it's corrupted. */\nSeafRepo*\nseaf_repo_manager_get_repo_ex (SeafRepoManager *manager, const gchar *id);\n\ngboolean\nseaf_repo_manager_repo_exists (SeafRepoManager *manager, const gchar *id);\n\nGList* \nseaf_repo_manager_get_repo_list (SeafRepoManager *mgr, int start, int limit,\n                                 const gchar *order_by, int ret_virt_repo);\n\ngint64\nseaf_repo_manager_count_repos (SeafRepoManager *mgr, GError **error);\n\nGList*\nseaf_repo_manager_get_trash_repo_list (SeafRepoManager *mgr,\n                                       int start,\n                                       int limit,\n                                       GError **error);\n\nGList *\nseaf_repo_manager_get_trash_repos_by_owner (SeafRepoManager *mgr,\n                                            const char *owner,\n                                            GError **error);\n\nint\nseaf_repo_manager_del_repo_from_trash (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       GError **error);\n\n/* Remove all entries in the repo trash. */\nint\nseaf_repo_manager_empty_repo_trash (SeafRepoManager *mgr, GError **error);\n\nint\nseaf_repo_manager_empty_repo_trash_by_owner (SeafRepoManager *mgr,\n                                             const char *owner,\n                                             GError **error);\n\nint\nseaf_repo_manager_restore_repo_from_trash (SeafRepoManager *mgr,\n                                           const char *repo_id,\n                                           GError **error);\n\nGList *\nseaf_repo_manager_get_repo_id_list (SeafRepoManager *mgr);\n\nint\nseaf_repo_manager_branch_repo_unmap (SeafRepoManager *manager, SeafBranch *branch);\n\n/*\n * Repo properties functions.\n */\n\n#define MAX_REPO_TOKEN 64\n#define DEFAULT_REPO_TOKEN \"default\"\n\nchar *\nseaf_repo_manager_get_email_by_token (SeafRepoManager *manager,\n                                      const char *repo_id,\n                                      const char *token);\nchar *\nseaf_repo_manager_generate_repo_token (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *email,\n                                       GError **error);\n\nint\nseaf_repo_manager_add_token_peer_info (SeafRepoManager *mgr,\n                                       const char *token,\n                                       const char *peer_id,\n                                       const char *peer_ip,\n                                       const char *peer_name,\n                                       gint64 sync_time,\n                                       const char *client_ver);\n\nint\nseaf_repo_manager_update_token_peer_info (SeafRepoManager *mgr,\n                                          const char *token,\n                                          const char *peer_ip,\n                                          gint64 sync_time,\n                                          const char *client_ver);\n\ngboolean\nseaf_repo_manager_token_peer_info_exists (SeafRepoManager *mgr,\n                                          const char *token);\n\nint\nseaf_repo_manager_delete_token (SeafRepoManager *mgr,\n                                const char *repo_id,\n                                const char *token,\n                                const char *user,\n                                GError **error);\n\nGList *\nseaf_repo_manager_list_repo_tokens (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    GError **error);\nGList *\nseaf_repo_manager_list_repo_tokens_by_email (SeafRepoManager *mgr,\n                                             const char *email,\n                                             GError **error);\nint\nseaf_repo_manager_delete_repo_tokens_by_peer_id (SeafRepoManager *mgr,\n                                                 const char *email,\n                                                 const char *peer_id,\n                                                 GList **tokens,\n                                                 GError **error);\n\nint\nseaf_repo_manager_delete_repo_tokens_by_email (SeafRepoManager *mgr,\n                                               const char *email,\n                                               GError **error);\n\ngint64\nseaf_repo_manager_get_repo_size (SeafRepoManager *mgr, const char *repo_id);\n\nint\nseaf_repo_manager_set_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          int days);\n\n/*\n * > 0: keep a period of history;\n * = 0: don't keep history;\n * < 0: keep full history.\n */\nint\nseaf_repo_manager_get_repo_history_limit (SeafRepoManager *mgr,\n                                          const char *repo_id);\n\nint\nseaf_repo_manager_set_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        gint64 timestamp);\n\ngint64\nseaf_repo_manager_get_repo_valid_since (SeafRepoManager *mgr,\n                                        const char *repo_id);\n\n/*\n * Return the timestamp to stop traversing history.\n * Returns > 0 if traverse a period of history;\n * Returns = 0 if only traverse the head commit;\n * Returns < 0 if traverse full history.\n */\ngint64\nseaf_repo_manager_get_repo_truncate_time (SeafRepoManager *mgr,\n                                          const char *repo_id);\n\n/*\n * Repo Operations.\n */\n\nint\nseaf_repo_manager_revert_on_server (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *commit_id,\n                                    const char *user_name,\n                                    GError **error);\n\n/**\n * Add a new file in a repo.\n * The content of the file is stored in a temporary file.\n * @repo_id:        id of the repo\n * @temp_file_path: path of the temporary file \n * @parent_dir:     the directory to add this file\n * @file_name:      the name of the new file\n * @user:           author of this operation\n */\nint\nseaf_repo_manager_post_file (SeafRepoManager *mgr,\n                             const char *repo_id,\n                             const char *temp_file_path,\n                             const char *parent_dir,\n                             const char *file_name,\n                             const char *user,\n                             GError **error);\n\nint\nseaf_repo_manager_post_multi_files (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *parent_dir,\n                                    const char *filenames_json,\n                                    const char *paths_json,\n                                    const char *user,\n                                    int replace_existed,\n                                    gint64 mtime,\n                                    char **new_ids,\n                                    char **task_id,\n                                    GError **error);\n\n/* int */\n/* seaf_repo_manager_post_file_blocks (SeafRepoManager *mgr, */\n/*                                     const char *repo_id, */\n/*                                     const char *parent_dir, */\n/*                                     const char *file_name, */\n/*                                     const char *blockids_json, */\n/*                                     const char *paths_json, */\n/*                                     const char *user, */\n/*                                     gint64 file_size, */\n/*                                     int replace_existed, */\n/*                                     char **new_id, */\n/*                                     GError **error); */\n\nint\nseaf_repo_manager_post_blocks (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *blockids_json,\n                               const char *paths_json,\n                               const char *user,\n                               GError **error);\n\nint\nseaf_repo_manager_commit_file_blocks (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *parent_dir,\n                                      const char *file_name,\n                                      const char *blockids_json,\n                                      const char *user,\n                                      gint64 file_size,\n                                      int replace_existed,\n                                      gint64 mtime,\n                                      char **new_id,\n                                      GError **error);\n\nint\nseaf_repo_manager_post_empty_file (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *parent_dir,\n                                   const char *new_file_name,\n                                   const char *user,\n                                   GError **error);\n\nint\nseaf_repo_manager_post_dir (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *parent_dir,\n                            const char *new_dir_name,\n                            const char *user,\n                            GError **error);\n\nint\nseaf_repo_manager_mkdir_with_parents (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *parent_dir,\n                                      const char *new_dir_path,\n                                      const char *user,\n                                      GError **error);\n\n/**\n * Update an existing file in a repo\n * @params: same as seaf_repo_manager_post_file\n * @head_id: the commit id for the original file version.\n *           It's optional. If it's NULL, the current repo head will be used.\n * @new_file_id: The return location of the new file id\n */\nint\nseaf_repo_manager_put_file (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *temp_file_path,\n                            const char *parent_dir,\n                            const char *file_name,\n                            const char *user,\n                            const char *head_id,\n                            gint64 mtime,\n                            char **new_file_id,                            \n                            GError **error);\n\n/* int */\n/* seaf_repo_manager_put_file_blocks (SeafRepoManager *mgr, */\n/*                                    const char *repo_id, */\n/*                                    const char *parent_dir, */\n/*                                    const char *file_name, */\n/*                                    const char *blockids_json, */\n/*                                    const char *paths_json, */\n/*                                    const char *user, */\n/*                                    const char *head_id, */\n/*                                    gint64 file_size, */\n/*                                    char **new_file_id, */\n/*                                    GError **error); */\n\nint\nseaf_repo_manager_del_file (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *parent_dir,\n                            const char *file_name,\n                            const char *user,\n                            GError **error);\n\nint\nseaf_repo_manager_batch_del_files (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *file_list,\n                                   const char *user,\n                                   GError **error);\n\nSeafileCopyResult *\nseaf_repo_manager_copy_file (SeafRepoManager *mgr,\n                             const char *src_repo_id,\n                             const char *src_dir,\n                             const char *src_filename,\n                             const char *dst_repo_id,\n                             const char *dst_dir,\n                             const char *dst_filename,\n                             const char *user,\n                             int need_progress,\n                             int synchronous,\n                             GError **error);\n\nSeafileCopyResult *\nseaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr,\n                                       const char *src_repo_id,\n                                       const char *src_dir,\n                                       const char *src_filenames,\n                                       const char *dst_repo_id,\n                                       const char *dst_dir,\n                                       const char *dst_filenames,\n                                       const char *user,\n                                       int need_progress,\n                                       int synchronous,\n                                       GError **error);\n\nSeafileCopyResult *\nseaf_repo_manager_move_file (SeafRepoManager *mgr,\n                             const char *src_repo_id,\n                             const char *src_dir,\n                             const char *src_filename,\n                             const char *dst_repo_id,\n                             const char *dst_dir,\n                             const char *dst_filename,\n                             int replace,\n                             const char *user,\n                             int need_progress,\n                             int synchronous,\n                             GError **error);\n\nSeafileCopyResult *\nseaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,\n                                       const char *src_repo_id,\n                                       const char *src_dir,\n                                       const char *src_filenames,\n                                       const char *dst_repo_id,\n                                       const char *dst_dir,\n                                       const char *dst_filenames,\n                                       int replace,\n                                       const char *user,\n                                       int need_progress,\n                                       int synchronous,\n                                       GError **error);\n\nint\nseaf_repo_manager_rename_file (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *parent_dir,\n                               const char *oldname,\n                               const char *newname,\n                               const char *user,\n                               GError **error);\n\nint\nseaf_repo_manager_is_valid_filename (SeafRepoManager *mgr,\n                                     const char *repo_id,\n                                     const char *filename,\n                                     GError **error);\n\nchar *\nseaf_repo_manager_create_new_repo (SeafRepoManager *mgr,\n                                   const char *repo_name,\n                                   const char *repo_desc,\n                                   const char *owner_email,\n                                   const char *passwd,\n                                   int enc_version,\n                                   const char *pwd_hash_algo,\n                                   const char *pwd_hash_params,\n                                   GError **error);\n\nchar *\nseaf_repo_manager_create_enc_repo (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *repo_name,\n                                   const char *repo_desc,\n                                   const char *owner_email,\n                                   const char *magic,\n                                   const char *random_key,\n                                   const char *salt,\n                                   int enc_version,\n                                   const char *pwd_hash,\n                                   const char *pwd_hash_algo,\n                                   const char *pwd_hash_params,\n                                   GError **error);\n\n/* Give a repo and a path in this repo, returns a list of commits, where every\n * commit contains a unique version of the file. The commits are sorted in\n * ascending order of commit time. */\nGList *\nseaf_repo_manager_list_file_revisions (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *start_commit_id,\n                                       const char *path,\n                                       int limit,\n                                       gboolean got_latest,\n                                       gboolean got_second,\n                                       GError **error);\n\nGList *\nseaf_repo_manager_calc_files_last_modified (SeafRepoManager *mgr,\n                                            const char *repo_id,\n                                            const char *parent_dir,\n                                            int limit,\n                                            GError **error);\n\nint\nseaf_repo_manager_revert_file (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *commit_id,\n                               const char *path,\n                               const char *user,\n                               GError **error);\n\nint\nseaf_repo_manager_revert_dir (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *old_commit_id,\n                              const char *dir_path,\n                              const char *user,\n                              GError **error);\n\n/*\n * Return deleted files/dirs.\n */\nGList *\nseaf_repo_manager_get_deleted_entries (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       int show_days,\n                                       const char *path,\n                                       const char *scan_stat,\n                                       int limit,\n                                       GError **error);\n\n/*\n * Set the dir_id of @dir_path to @new_dir_id.\n * @new_commit_id: The new head commit id after the update.\n */\nint\nseaf_repo_manager_update_dir (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *dir_path,\n                              const char *new_dir_id,\n                              const char *user,\n                              const char *head_id,\n                              char *new_commit_id,\n                              GError **error);\n\n/*\n * Permission related functions.\n */\n\n/* Owner functions. */\n\nint\nseaf_repo_manager_set_repo_owner (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  const char *email);\n\nchar *\nseaf_repo_manager_get_repo_owner (SeafRepoManager *mgr,\n                                  const char *repo_id);\n\nGList *\nseaf_repo_manager_get_orphan_repo_list (SeafRepoManager *mgr);\n\n/* TODO: add start and limit. */\n/* Get repos owned by this user.\n */\nGList *\nseaf_repo_manager_get_repos_by_owner (SeafRepoManager *mgr,\n                                      const char *email,\n                                      int ret_corrupted,\n                                      int start,\n                                      int limit,\n                                      gboolean *db_err);\n\nGList *\nseaf_repo_manager_get_repos_by_id_prefix (SeafRepoManager *mgr,\n                                          const char *id_prefix,\n                                          int start,\n                                          int limit);\n\nGList *\nseaf_repo_manager_search_repos_by_name (SeafRepoManager *mgr, const char *name);\n\nGList *\nseaf_repo_manager_get_repo_ids_by_owner (SeafRepoManager *mgr,\n                                         const char *email);\n\n/* Group repos. */\n\nint\nseaf_repo_manager_add_group_repo (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  int group_id,\n                                  const char *owner,\n                                  const char *permission,\n                                  GError **error);\nint\nseaf_repo_manager_del_group_repo (SeafRepoManager *mgr,\n                                  const char *repo_id,\n                                  int group_id,\n                                  GError **error);\n\nGList *\nseaf_repo_manager_get_groups_by_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      GError **error);\n\ntypedef struct GroupPerm {\n    int group_id;\n    char permission[16];\n} GroupPerm;\n\nGList *\nseaf_repo_manager_get_group_perm_by_repo (SeafRepoManager *mgr,\n                                          const char *repo_id,\n                                          GError **error);\n\nint\nseaf_repo_manager_set_group_repo_perm (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       int group_id,\n                                       const char *permission,\n                                       GError **error);\n\nchar *\nseaf_repo_manager_get_group_repo_owner (SeafRepoManager *mgr,\n                                        const char *repo_id,\n                                        GError **error);\n\nGList *\nseaf_repo_manager_get_group_repoids (SeafRepoManager *mgr,\n                                     int group_id,\n                                     GError **error);\n\nGList *\nseaf_repo_manager_get_repos_by_group (SeafRepoManager *mgr,\n                                      int group_id,\n                                      GError **error);\n\nGList *\nseaf_repo_manager_get_group_repos_by_owner (SeafRepoManager *mgr,\n                                            const char *owner,\n                                            GError **error);\n\nint\nseaf_repo_manager_remove_group_repos (SeafRepoManager *mgr,\n                                      int group_id,\n                                      const char *owner,\n                                      GError **error);\n\n/* Inner public repos */\n\nint\nseaf_repo_manager_set_inner_pub_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *permission);\n\nint\nseaf_repo_manager_unset_inner_pub_repo (SeafRepoManager *mgr,\n                                        const char *repo_id);\n\ngboolean\nseaf_repo_manager_is_inner_pub_repo (SeafRepoManager *mgr,\n                                     const char *repo_id);\n\nGList *\nseaf_repo_manager_list_inner_pub_repos (SeafRepoManager *mgr, gboolean *db_err);\n\ngint64\nseaf_repo_manager_count_inner_pub_repos (SeafRepoManager *mgr);\n\nGList *\nseaf_repo_manager_list_inner_pub_repos_by_owner (SeafRepoManager *mgr,\n                                                 const char *user);\n\nchar *\nseaf_repo_manager_get_inner_pub_repo_perm (SeafRepoManager *mgr,\n                                           const char *repo_id);\n\n/*\n * Comprehensive repo permission checker.\n * It checks if @user have permission to access @repo_id.\n */\nchar *\nseaf_repo_manager_check_permission (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *user,\n                                    GError **error);\n\nGList *\nseaf_repo_manager_list_dir_with_perm (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *dir_path,\n                                      const char *dir_id,\n                                      const char *user,\n                                      int offset,\n                                      int limit,\n                                      GError **error);\n\n/* Web access permission. */\n\nint\nseaf_repo_manager_set_access_property (SeafRepoManager *mgr, const char *repo_id,\n                                       const char *ap);\n\nchar *\nseaf_repo_manager_query_access_property (SeafRepoManager *mgr, const char *repo_id);\n\n/* Decrypted repo token cache. */\n\nvoid\nseaf_repo_manager_add_decrypted_token (SeafRepoManager *mgr,\n                                       const char *encrypted_token,\n                                       const char *session_key,\n                                       const char *decrypted_token);\n\nchar *\nseaf_repo_manager_get_decrypted_token (SeafRepoManager *mgr,\n                                       const char *encrypted_token,\n                                       const char *session_key);\n\n/* Virtual repo related. */\n\nchar *\nseaf_repo_manager_create_virtual_repo (SeafRepoManager *mgr,\n                                       const char *origin_repo_id,\n                                       const char *path,\n                                       const char *repo_name,\n                                       const char *repo_desc,\n                                       const char *owner,\n                                       const char *passwd,\n                                       GError **error);\n\nSeafVirtRepo *\nseaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr,\n                                         const char *repo_id);\n\nGList *\nseaf_repo_manager_get_virtual_info_by_origin (SeafRepoManager *mgr,\n                                              const char *origin_repo);\n\nvoid\nseaf_virtual_repo_info_free (SeafVirtRepo *vinfo);\n\ngboolean\nseaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id);\n\nchar *\nseaf_repo_manager_get_virtual_repo_id (SeafRepoManager *mgr,\n                                       const char *origin_repo,\n                                       const char *path,\n                                       const char *owner);\n\nGList *\nseaf_repo_manager_get_virtual_repos_by_owner (SeafRepoManager *mgr,\n                                              const char *owner,\n                                              GError **error);\n\nGList *\nseaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr,\n                                                  const char *origin_repo);\n\n/*\n * if @repo_id is a virtual repo, try to merge with origin;\n * if not, try to merge with its virtual repos.\n */\nint\nseaf_repo_manager_merge_virtual_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *exclude_repo);\n\n/*\n * Check each virtual repo of @origin_repo_id, if the path corresponds to it\n * doesn't exist, delete the virtual repo.\n */\nvoid\nseaf_repo_manager_cleanup_virtual_repos (SeafRepoManager *mgr,\n                                         const char *origin_repo_id);\n\nint\nseaf_repo_manager_init_merge_scheduler ();\n\nGList *\nseaf_repo_manager_get_shared_users_for_subdir (SeafRepoManager *mgr,\n                                               const char *repo_id,\n                                               const char *path,\n                                               const char *from_user,\n                                               GError **error);\n\nGList *\nseaf_repo_manager_get_shared_groups_for_subdir (SeafRepoManager *mgr,\n                                                const char *repo_id,\n                                                const char *path,\n                                                const char *from_user,\n                                                GError **error);\nint\nseaf_repo_manager_edit_repo (const char *repo_id,\n                const char *name,\n                const char *description,\n                const char *user,\n                GError **error);\n\ngint64\nseaf_get_total_file_number (GError **error);\n\ngint64\nseaf_get_total_storage (GError **error);\n\n/* Online GC related */\n\nchar *\nseaf_repo_get_current_gc_id (SeafRepo *repo);\n\nchar *\nseaf_repo_get_last_gc_id (SeafRepo *repo, const char *client_id);\n\ngboolean\nseaf_repo_has_last_gc_id (SeafRepo *repo, const char *client_id);\n\nint\nseaf_repo_set_last_gc_id (SeafRepo *repo,\n                          const char *client_id,\n                          const char *gc_id);\n\nint\nseaf_repo_remove_last_gc_id (SeafRepo *repo,\n                             const char *client_id);\n\nint\nseaf_repo_manager_add_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       const char *tmp_file,\n                                       GError **error);\n\nint\nseaf_repo_manager_del_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       GError **error);\n\nchar *\nseaf_repo_manager_get_upload_tmp_file (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *file_path,\n                                       GError **error);\n\ngint64\nseaf_repo_manager_get_upload_tmp_file_offset (SeafRepoManager *mgr,\n                                              const char *repo_id,\n                                              const char *file_path,\n                                              GError **error);\n\nvoid\nseaf_repo_manager_update_repo_info (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *head_commit_id);\n\nint\nset_repo_commit_to_db (const char *repo_id, const char *repo_name, gint64 update_time,\n                       int version, gboolean is_encrypted, const char *last_modifier);\nchar *\nseaf_get_trash_repo_owner (const char *repo_id);\n\nGObject *\nseaf_get_group_shared_repo_by_path (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *path,\n                                    int group_id,\n                                    gboolean is_org,\n                                    GError **error);\n\nGList *\nseaf_get_group_repos_by_user (SeafRepoManager *mgr,\n                              const char *user,\n                              int org_id,\n                              GError **error);\n\nint\nseaf_repo_manager_set_subdir_group_perm_by_path (SeafRepoManager *mgr,\n                                                 const char *repo_id,\n                                                 const char *username,\n                                                 int group_id,\n                                                 const char *permission,\n                                                 const char *path);\n\nint\npost_files_and_gen_commit (GList *filenames,\n                          const char *repo_id,\n                          const char *user,\n                          char **ret_json,\n                          int replace_existed,\n                          const char *canon_path,\n                          GList *id_list,\n                          GList *size_list,\n                          gint64 mtime,\n                          char *last_gc_id,\n                          GError **error);\n\nchar *\nseaf_repo_manager_convert_repo_path (SeafRepoManager *mgr,\n                                     const char *repo_id,\n                                     const char *path,\n                                     const char *user,\n                                     gboolean is_org,\n                                     GError **error);\nint\nseaf_repo_manager_set_repo_status(SeafRepoManager *mgr,\n                                  const char *repo_id, RepoStatus status);\n\nint\nseaf_repo_manager_get_repo_status(SeafRepoManager *mgr,\n                                  const char *repo_id);\n\nint\nseaf_repo_manager_repair_virtual_repo (char *repo_id);\n#endif\n"
  },
  {
    "path": "server/repo-op.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <glib/gstdio.h>\n\n#include <jansson.h>\n#include <openssl/sha.h>\n\n#include \"utils.h\"\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n#include \"seafile-object.h\"\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n#include \"seafile-crypt.h\"\n#include \"diff-simple.h\"\n#include \"merge-new.h\"\n#include \"change-set.h\"\n#include \"seaf-utils.h\"\n\n#include \"seaf-db.h\"\n\n#define INDEX_DIR \"index\"\n\n#define PREFIX_DEL_FILE \"Deleted \\\"\"\n#define PREFIX_DEL_DIR \"Removed directory \\\"\"\n#define PREFIX_DEL_DIRS \"Removed \\\"\"\n\n#define DUPLICATE_NAMES_COUNT 1000\n\ngboolean\nshould_ignore_file(const char *filename, void *data);\n\nstatic gboolean\nis_virtual_repo_and_origin (SeafRepo *repo1, SeafRepo *repo2);\n\nstatic gboolean\ncheck_file_count_and_size (SeafRepo *repo, SeafDirent *dent, gint64 total_files,\n                           gint64 *total_size_all, char **err_str);\n\nint\npost_files_and_gen_commit (GList *filenames,\n                          const char *repo_id,\n                          const char *user,\n                          char **ret_json,\n                          int replace_existed,\n                          const char *canon_path,\n                          GList *id_list,\n                          GList *size_list,\n                          gint64 mtime,\n                          char *last_gc_id,\n                          GError **error);\n\n/*\n * Repo operations.\n */\n\nstatic gint\ncompare_dirents (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *ent_a = a, *ent_b = b;\n\n    return strcmp (ent_b->name, ent_a->name);\n}\n\nstatic inline GList *\ndup_seafdir_entries (const GList *entries)\n{\n    const GList *p;\n    GList *newentries = NULL;\n    SeafDirent *dent;\n    \n    for (p = entries; p; p = p->next) {\n        dent = p->data;\n        newentries = g_list_prepend (newentries, seaf_dirent_dup(dent));\n    }\n\n    return g_list_reverse(newentries);\n}\n\nstatic gboolean\nfilename_exists (GList *entries, const char *filename)\n{\n    GList *ptr;\n    SeafDirent *dent;\n\n    for (ptr = entries; ptr != NULL; ptr = ptr->next) {\n        dent = ptr->data;\n        if (strcmp (dent->name, filename) == 0)\n            return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic char *\ngenerate_unique_filename (const char *file, GList *entries)\n{\n    int i = 1;\n    char *name, *ext, *unique_name;\n\n    unique_name = g_strdup(file);\n    split_filename (unique_name, &name, &ext);\n    while (filename_exists (entries, unique_name) && i <= DUPLICATE_NAMES_COUNT) {\n        g_free (unique_name);\n        if (ext)\n            unique_name = g_strdup_printf (\"%s (%d).%s\", name, i, ext);\n        else\n            unique_name = g_strdup_printf (\"%s (%d)\", name, i);\n        i++;\n    }\n\n    g_free (name);\n    g_free (ext);\n\n    if (i <= DUPLICATE_NAMES_COUNT)\n        return unique_name;\n    else {\n        g_free (unique_name);\n        return NULL;\n    }\n}\n\n/* We need to call this function recursively because every dirs in canon_path\n * need to be updated.\n */\nstatic char *\npost_file_recursive (SeafRepo *repo,\n                     const char *dir_id,\n                     const char *to_path,\n                     int replace_existed,\n                     SeafDirent *newdent)\n{\n    SeafDir *olddir, *newdir;\n    SeafDirent *dent;\n    GList *ptr;\n    char *slash;\n    char *to_path_dup = NULL;\n    char *remain = NULL;\n    char *id = NULL;\n    char *ret = NULL;\n\n    olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr,\n                                                repo->store_id, repo->version,\n                                                dir_id);\n    if (!olddir)\n        return NULL;\n\n    /* we reach the target dir.  new dir entry is added */\n    if (*to_path == '\\0') {\n        GList *newentries = NULL;\n        char *unique_name;\n        SeafDirent *dent_dup;\n        if (replace_existed && filename_exists(olddir->entries, newdent->name)) {\n            GList *p;\n            SeafDirent *dent;\n\n            for (p = olddir->entries; p; p = p->next) {\n                dent = p->data;\n                if (strcmp(dent->name, newdent->name) == 0) {\n                    newentries = g_list_prepend (newentries, seaf_dirent_dup(newdent));\n                } else {\n                    newentries = g_list_prepend (newentries, seaf_dirent_dup(dent));\n                }\n            }\n            newentries = g_list_reverse (newentries);\n            newdir = seaf_dir_new (NULL, newentries,\n                                   dir_version_from_repo_version(repo->version));\n            if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0) {\n                ret = g_strdup (newdir->dir_id);\n            }\n            seaf_dir_free (newdir);\n            goto out;\n        }\n\n        unique_name = generate_unique_filename (newdent->name, olddir->entries);\n        if (!unique_name)\n            goto out;\n        dent_dup = seaf_dirent_new (newdent->version,\n                                    newdent->id, newdent->mode, unique_name,\n                                    newdent->mtime, newdent->modifier, newdent->size);\n        g_free (unique_name);\n\n        newentries = dup_seafdir_entries (olddir->entries);\n\n        newentries = g_list_insert_sorted (newentries,\n                                           dent_dup,\n                                           compare_dirents);\n\n        newdir = seaf_dir_new (NULL, newentries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup (newdir->dir_id);\n        seaf_dir_free (newdir);\n\n        goto out;\n    }\n\n    to_path_dup = g_strdup (to_path);\n    slash = strchr (to_path_dup, '/');\n\n    if (!slash) {\n        remain = to_path_dup + strlen(to_path_dup);\n    } else {\n        *slash = '\\0';\n        remain = slash + 1;\n    }\n\n    for (ptr = olddir->entries; ptr; ptr = ptr->next) {\n        dent = (SeafDirent *)ptr->data;\n\n        if (strcmp(dent->name, to_path_dup) != 0)\n            continue;\n\n        id = post_file_recursive (repo, dent->id, remain, replace_existed, newdent);\n        if (id != NULL) {\n            memcpy(dent->id, id, 40);\n            dent->id[40] = '\\0';\n            if (repo->version > 0)\n                dent->mtime = (guint64)time(NULL);\n        }\n        break;\n    }\n    \n    if (id != NULL) {\n        /* Create a new SeafDir. */\n        GList *new_entries;\n        \n        new_entries = dup_seafdir_entries (olddir->entries);\n        newdir = seaf_dir_new (NULL, new_entries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strndup (newdir->dir_id, 40);\n        seaf_dir_free (newdir);\n    }\n\nout:\n    g_free (to_path_dup);\n    g_free (id);\n    seaf_dir_free(olddir);\n    return ret;\n}\n\nstatic char *\ndo_post_file_replace (SeafRepo *repo,\n                      const char *root_id,\n                      const char *parent_dir,\n                      int replace_existed,\n                      SeafDirent *dent)\n{\n    /* if parent_dir is a absolutely path, we will remove the first '/' */\n    if (*parent_dir == '/')\n        parent_dir = parent_dir + 1;\n\n    return post_file_recursive(repo, root_id, parent_dir, replace_existed, dent);\n}\n\nstatic char *\ndo_post_file (SeafRepo *repo,\n              const char *root_id,\n              const char *parent_dir,\n              SeafDirent *dent)\n{\n    return do_post_file_replace(repo, root_id, parent_dir, 0, dent);\n}\n\nstatic char *\nget_canonical_path (const char *path)\n{\n    char *ret = g_strdup (path);\n    char *p;\n\n    for (p = ret; *p != 0; ++p) {\n        if (*p == '\\\\')\n            *p = '/';\n    }\n\n    /* Remove trailing slashes from dir path. */\n    int len = strlen(ret);\n    int i = len - 1;\n    while (i >= 0 && ret[i] == '/')\n        ret[i--] = 0;\n\n    return ret;\n}\n\n/* Return TRUE if @filename already existing in @parent_dir. If exists, and\n   @mode is not NULL, set its value to the mode of the dirent.\n*/\nstatic gboolean\ncheck_file_exists (const char *store_id,\n                   int repo_version,\n                   const char *root_id,\n                   const char *parent_dir,\n                   const char *filename,\n                   int  *mode)\n{\n    SeafDir *dir;\n    GList *p;\n    SeafDirent *dent;\n    int ret = FALSE;\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               store_id, repo_version,\n                                               root_id,\n                                               parent_dir, NULL);\n    if (!dir) {\n        seaf_warning (\"parent_dir %s doesn't exist in repo %s.\\n\",\n                      parent_dir, store_id);\n        return FALSE;\n    }\n\n    for (p = dir->entries; p != NULL; p = p->next) {\n        dent = p->data;\n        int r = strcmp (dent->name, filename);\n        if (r == 0) {\n            ret = TRUE;\n            if (mode) {\n                *mode = dent->mode;\n            }\n            break;\n        }\n    }\n\n    seaf_dir_free (dir);\n\n    return ret;\n}\n\n/**\n  Various online file/directory operations:\n\n  Put a file:\n  1. find parent seafdir\n  2. add a new dirent to parent seafdir\n  2. recursively update all seafdir in the path, in a bottom-up manner\n  3. commit it\n\n  Del a file/dir:\n  basically the same as put a file\n\n  copy a file/dir:\n  1. get src dirent from src repo\n  2. duplicate src dirent with the new file name\n  3. put the new dirent to dst repo and commit it.\n\n  Move a file/dir:\n  basically the same as a copy operation. Just one more step:\n  4. remove src dirent from src repo and commit it\n\n  Rename a file/dir:\n  1. find parent seafdir\n  2. update this seafdir with the old dirent replaced by a new dirent.\n  3. recursively update all seafdir in the path\n  \n  NOTE:\n  \n  All operations which add a new dirent would check if a dirent with the same\n  name already exists. If found, they would raise errors.\n\n  All operations which remove a dirent would check if the dirent to be removed\n  already exists. If not, they would do nothing and just return OK.\n\n*/\n\n#define GET_REPO_OR_FAIL(repo_var,repo_id)                              \\\n    do {                                                                \\\n        repo_var = seaf_repo_manager_get_repo (seaf->repo_mgr, (repo_id)); \\\n        if (!(repo_var)) {                                              \\\n            seaf_warning (\"Repo %s doesn't exist.\\n\", (repo_id));       \\\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid repo\"); \\\n            ret = -1;                                                   \\\n            goto out;                                                   \\\n        }                                                               \\\n    } while (0);\n\n#define GET_COMMIT_OR_FAIL(commit_var,repo_id,repo_version,commit_id)   \\\n    do {                                                                \\\n        commit_var = seaf_commit_manager_get_commit(seaf->commit_mgr, (repo_id), (repo_version), (commit_id)); \\\n        if (!(commit_var)) {                                            \\\n            seaf_warning (\"commit %s:%s doesn't exist.\\n\", (repo_id), (commit_id)); \\\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit\"); \\\n            ret = -1;                                                   \\\n            goto out;                                                   \\\n        }                                                               \\\n    } while (0);\n\n#define FAIL_IF_FILE_EXISTS(store_id,repo_version,root_id,parent_dir,filename,mode) \\\n    do {                                                                \\\n        if (check_file_exists ((store_id), (repo_version), (root_id), (parent_dir), (filename), (mode))) { \\\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,      \\\n                         \"file already exists\");                        \\\n            ret = -1;                                                   \\\n            goto out;                                                   \\\n        }                                                               \\\n    } while (0);\n\n#define FAIL_IF_FILE_NOT_EXISTS(store_id,repo_version,root_id,parent_dir,filename,mode)       \\\n    do {                                                                \\\n        if (!check_file_exists ((store_id), (repo_version), (root_id), (parent_dir), (filename), (mode))) { \\\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,      \\\n                         \"file does not exist\");                        \\\n            ret = -1;                                                   \\\n            goto out;                                                   \\\n        }                                                               \\\n    } while (0);\n\n#define STD_FILE_MODE (S_IFREG | 0644)\n\nstatic char *\ngen_merge_description (SeafRepo *repo,\n                       const char *merged_root,\n                       const char *p1_root,\n                       const char *p2_root)\n{\n    GList *p;\n    GList *results = NULL;\n    char *desc;\n    \n    diff_merge_roots (repo->store_id, repo->version,\n                      merged_root, p1_root, p2_root, &results, TRUE);\n\n    desc = diff_results_to_description (results);\n\n    for (p = results; p; p = p->next) {\n        DiffEntry *de = p->data;\n        diff_entry_free (de);\n    }\n    g_list_free (results);\n\n    return desc;\n}\n\nstatic int\ngen_new_commit (const char *repo_id,\n                SeafCommit *base,\n                const char *new_root,\n                const char *user,\n                const char *desc,\n                char *new_commit_id,\n                gboolean handle_concurrent_update,\n                gboolean check_gc,\n                const char *last_gc_id,\n                GError **error)\n{\n#define MAX_RETRY_COUNT 10\n\n    SeafRepo *repo = NULL;\n    SeafCommit *new_commit = NULL, *current_head = NULL, *merged_commit = NULL;\n    int retry_cnt = 0;\n    gboolean gc_conflict = FALSE;\n    int ret = 0;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Repo %s doesn't exist.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Invalid repo\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Create a new commit pointing to new_root. */\n    new_commit = seaf_commit_new(NULL, repo->id, new_root,\n                                 user, EMPTY_SHA1,\n                                 desc, 0);\n    new_commit->parent_id = g_strdup (base->commit_id);\n    seaf_repo_to_commit (repo, new_commit);\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit) < 0) {\n        seaf_warning (\"Failed to add commit.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add commit\");\n        ret = -1;\n        goto out;\n    }\n\nretry:\n    current_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                   repo->id, repo->version, \n                                                   repo->head->commit_id);\n    if (!current_head) {\n        seaf_warning (\"Failed to find head commit %s of %s.\\n\",\n                      repo->head->commit_id, repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Invalid repo\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Merge if base and head are not the same. */\n    if (strcmp (base->commit_id, current_head->commit_id) != 0) {\n        if (!handle_concurrent_update) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_CONCURRENT_UPLOAD, \"Concurrent upload\");\n            ret = -1;\n            goto out;\n        }\n        MergeOptions opt;\n        const char *roots[3];\n        char *desc = NULL;\n\n        memset (&opt, 0, sizeof(opt));\n        opt.n_ways = 3;\n        memcpy (opt.remote_repo_id, repo_id, 36);\n        memcpy (opt.remote_head, new_commit->commit_id, 40);\n        opt.do_merge = TRUE;\n\n        roots[0] = base->root_id; /* base */\n        roots[1] = current_head->root_id; /* head */\n        roots[2] = new_root;      /* remote */\n\n        if (seaf_merge_trees (repo->store_id, repo->version, 3, roots, &opt) < 0) {\n            seaf_warning (\"Failed to merge.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Internal error\");\n            ret = -1;\n            goto out;\n        }\n\n        seaf_debug (\"Number of dirs visted in merge %.8s: %d.\\n\",\n                    repo_id, opt.visit_dirs);\n\n        if (!opt.conflict)\n            desc = g_strdup(\"Auto merge by system\");\n        else {\n            desc = gen_merge_description (repo,\n                                          opt.merged_tree_root,\n                                          current_head->root_id,\n                                          new_root);\n            if (!desc)\n                desc = g_strdup(\"Auto merge by system\");\n        }\n\n        merged_commit = seaf_commit_new(NULL, repo->id, opt.merged_tree_root,\n                                        user, EMPTY_SHA1,\n                                        desc,\n                                        0);\n        g_free (desc);\n\n        merged_commit->parent_id = g_strdup (current_head->commit_id);\n        merged_commit->second_parent_id = g_strdup (new_commit->commit_id);\n        merged_commit->new_merge = TRUE;\n        if (opt.conflict)\n            merged_commit->conflict = TRUE;\n        seaf_repo_to_commit (repo, merged_commit);\n\n        if (seaf_commit_manager_add_commit (seaf->commit_mgr, merged_commit) < 0) {\n            seaf_warning (\"Failed to add commit.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Failed to add commit\");\n            ret = -1;\n            goto out;\n        }\n    } else {\n        seaf_commit_ref (new_commit);\n        merged_commit = new_commit;\n    }\n\n    seaf_branch_set_commit(repo->head, merged_commit->commit_id);\n\n    if (seaf_db_type(seaf->db) == SEAF_DB_TYPE_SQLITE)\n        check_gc = FALSE;\n\n    if (check_gc)\n        gc_conflict = FALSE;\n\n    if (seaf_branch_manager_test_and_update_branch(seaf->branch_mgr,\n                                                   repo->head,\n                                                   current_head->commit_id,\n                                                   check_gc,\n                                                   last_gc_id,\n                                                   repo->store_id,\n                                                   &gc_conflict) < 0)\n    {\n        if (check_gc && gc_conflict) {\n            seaf_warning (\"Head branch update for repo %s conflicts with GC.\\n\",\n                          repo->id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GC_CONFLICT, \"GC Conflict\");\n            ret = -1;\n            goto out;\n        }\n        if (!handle_concurrent_update) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_CONCURRENT_UPLOAD, \"Concurrent upload\");\n            ret = -1;\n            goto out;\n        }\n        seaf_repo_unref (repo);\n        repo = NULL;\n        seaf_commit_unref (current_head);\n        current_head = NULL;\n        seaf_commit_unref (merged_commit);\n        merged_commit = NULL;\n\n        if (++retry_cnt <= MAX_RETRY_COUNT) {\n            /* Sleep random time between 0 and 3 seconds. */\n            usleep (g_random_int_range(0, 30) * 100 * 1000);\n\n            repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n            if (!repo) {\n                seaf_warning (\"Repo %s doesn't exist.\\n\", repo_id);\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Invalid repo\");\n                ret = -1;\n                goto out;\n            }\n\n            goto retry;\n        } else {\n            seaf_warning (\"Stop updating repo %s after %d retries.\\n\", repo_id, MAX_RETRY_COUNT);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Concurrent update\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (new_commit_id)\n        memcpy (new_commit_id, merged_commit->commit_id, 41);\n\nout:\n    seaf_commit_unref (new_commit);\n    seaf_commit_unref (current_head);\n    seaf_commit_unref (merged_commit);\n    seaf_repo_unref (repo);\n    return ret;\n}\n\nstatic void\nupdate_repo_size(const char *repo_id)\n{\n    schedule_repo_size_computation (seaf->size_sched, repo_id);\n}\n\nint\nseaf_repo_manager_post_file (SeafRepoManager *mgr,\n                             const char *repo_id,\n                             const char *temp_file_path,\n                             const char *parent_dir,\n                             const char *file_name,\n                             const char *user,\n                             GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    unsigned char sha1[20];\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafileCrypt *crypt = NULL;\n    SeafDirent *new_dent = NULL;\n    char hex[41];\n    char *gc_id = NULL;\n    int ret = 0;\n    int retry_cnt = 0;\n\n    if (g_access (temp_file_path, R_OK) != 0) {\n        seaf_warning (\"[post file] File %s doesn't exist or not readable.\\n\",\n                      temp_file_path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid input file\");\n        return -1;\n    }\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    if (!canon_path)\n        canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (file_name, NULL)) {\n        seaf_debug (\"[post file] Invalid filename %s.\\n\", file_name);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid filename\");\n        ret = -1;\n        goto out;\n    }\n\n    if (strstr (parent_dir, \"//\") != NULL) {\n        seaf_debug (\"[post file] parent_dir cantains // sequence.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid parent dir\");\n        ret = -1;\n        goto out;\n    }\n    \n    /* Write blocks. */\n    if (repo->encrypted) {\n        unsigned char key[32], iv[16];\n        if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr,\n                                                     repo_id, user,\n                                                     key, iv) < 0) {\n            seaf_debug (\"Passwd for repo %s is not set.\\n\", repo_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Passwd is not set\");\n            ret = -1;\n            goto out;\n        }\n        crypt = seafile_crypt_new (repo->enc_version, key, iv);\n    }\n\n    gc_id = seaf_repo_get_current_gc_id (repo);\n\n    gint64 size;\n    if (seaf_fs_manager_index_blocks (seaf->fs_mgr,\n                                      repo->store_id, repo->version,\n                                      temp_file_path,\n                                      sha1, &size, crypt, TRUE, FALSE, NULL) < 0) {\n        seaf_warning (\"failed to index blocks\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to index blocks\");\n        ret = -1;\n        goto out;\n    }\n\n    rawdata_to_hex(sha1, hex, 20);\n    new_dent = seaf_dirent_new (dir_version_from_repo_version (repo->version),\n                                hex, STD_FILE_MODE, file_name,\n                                (gint64)time(NULL), user, size);\n\nretry:\n    root_id = do_post_file (repo,\n                            head_commit->root_id, canon_path, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[post file] Failed to post file %s to %s in repo %s.\\n\",\n                      file_name, canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to post file\");\n        ret = -1;\n        goto out;\n    }\n\n    snprintf(buf, SEAF_PATH_MAX, \"Added \\\"%s\\\"\", file_name);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, FALSE, TRUE, gc_id, error) < 0) {\n        if (*error == NULL || (*error)->code != SEAF_ERR_CONCURRENT_UPLOAD) {\n            ret = -1;\n            goto out;\n        }\n\n        retry_cnt++;\n        seaf_debug (\"[post file] Concurrent upload retry :%d\\n\", retry_cnt);\n        /* Sleep random time between 0 and 3 seconds. */\n        usleep (g_random_int_range(0, 30) * 100 * 1000);\n\n        g_free (root_id);\n        g_clear_error (error);\n\n        seaf_repo_unref (repo);\n        seaf_commit_unref(head_commit);\n\n        GET_REPO_OR_FAIL(repo, repo_id);\n        GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n        goto retry;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    seaf_dirent_free (new_dent);\n    g_free (root_id);\n    g_free (canon_path);\n    g_free (crypt);\n    g_free (gc_id);\n\n    if (ret == 0)\n        update_repo_size(repo_id);\n\n    return ret;\n}\n\nstatic int\nadd_new_entries (SeafRepo *repo, const char *user, GList **entries,\n                 GList *dents, int replace_existed, GList **name_list)\n{\n    GList *ptr;\n    SeafDirent *dent;\n\n    for (ptr = dents; ptr; ptr = ptr->next) {\n        dent = ptr->data;\n\n        char *unique_name;\n        SeafDirent *newdent;\n        gboolean replace = FALSE;\n\n        if (replace_existed) {\n            GList *p;\n            SeafDirent *tmp_dent;\n            for (p = *entries; p; p = p->next) {\n                tmp_dent = p->data;\n                if (strcmp(tmp_dent->name, dent->name) == 0) {\n                    replace = TRUE;\n                    *entries = g_list_delete_link (*entries, p);\n                    seaf_dirent_free (tmp_dent);\n                    break;\n                }\n            }\n        }\n\n        if (replace)\n            unique_name = g_strdup (dent->name);\n        else\n            unique_name = generate_unique_filename (dent->name, *entries);\n\n        if (unique_name != NULL) {\n            newdent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                       dent->id, dent->mode, unique_name,\n                                       dent->mtime, user, dent->size);\n            *entries = g_list_insert_sorted (*entries, newdent, compare_dirents);\n            *name_list = g_list_append (*name_list, unique_name);\n            /* No need to free unique_name */\n        } else {\n            return -1;\n        }\n    }\n\n    return 0;\n}\n\nstatic char *\npost_multi_files_recursive (SeafRepo *repo,\n                            const char *dir_id,\n                            const char *to_path,\n                            GList *dents,\n                            const char *user,\n                            int replace_existed,\n                            GList **name_list)\n{\n    SeafDir *olddir, *newdir;\n    SeafDirent *dent;\n    GList *ptr;\n    char *slash;\n    char *to_path_dup = NULL;\n    char *remain = NULL;\n    char *id = NULL;\n    char *ret = NULL;\n\n    olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr,\n                                                repo->store_id,\n                                                repo->version,\n                                                dir_id);\n    if (!olddir)\n        return NULL;\n\n    /* we reach the target dir.  new dir entry is added */\n    if (*to_path == '\\0') {\n        GList *newentries;\n\n        newentries = dup_seafdir_entries (olddir->entries);\n\n        if (add_new_entries (repo, user,\n                             &newentries, dents, replace_existed, name_list) < 0)\n            goto out;\n\n        newdir = seaf_dir_new (NULL, newentries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup (newdir->dir_id);\n        seaf_dir_free (newdir);\n\n        goto out;\n    }\n\n    to_path_dup = g_strdup (to_path);\n    slash = strchr (to_path_dup, '/');\n\n    if (!slash) {\n        remain = to_path_dup + strlen(to_path_dup);\n    } else {\n        *slash = '\\0';\n        remain = slash + 1;\n    }\n\n    for (ptr = olddir->entries; ptr; ptr = ptr->next) {\n        dent = (SeafDirent *)ptr->data;\n\n        if (strcmp(dent->name, to_path_dup) != 0)\n            continue;\n\n        id = post_multi_files_recursive (repo, dent->id, remain, dents, user,\n                                         replace_existed, name_list);\n        if (id != NULL) {\n            memcpy(dent->id, id, 40);\n            dent->id[40] = '\\0';\n            if (repo->version > 0)\n                dent->mtime = (guint64)time(NULL);\n        }\n        break;\n    }\n    \n    if (id != NULL) {\n        /* Create a new SeafDir. */\n        GList *new_entries;\n        \n        new_entries = dup_seafdir_entries (olddir->entries);\n        newdir = seaf_dir_new (NULL, new_entries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup (newdir->dir_id);\n        seaf_dir_free (newdir);\n    }\n\nout:\n    g_free (to_path_dup);\n    g_free (id);\n    seaf_dir_free(olddir);\n    return ret;\n}\n\nstatic char *\ndo_post_multi_files (SeafRepo *repo,\n                     const char *root_id,\n                     const char *parent_dir,\n                     GList *filenames,\n                     GList *id_list,\n                     GList *size_list,\n                     const char *user,\n                     int replace_existed,\n                     gint64 mtime,\n                     GList **name_list)\n{\n    SeafDirent *dent;\n    GList *dents = NULL;\n    GList *ptr1, *ptr2, *ptr3;\n    char *ret;\n\n    for (ptr1 = filenames, ptr2 = id_list, ptr3 = size_list;\n         ptr1 && ptr2 && ptr3;\n         ptr1 = ptr1->next, ptr2 = ptr2->next, ptr3 = ptr3->next) {\n\n        char *name = ptr1->data;\n        char *id = ptr2->data;\n        gint64 *size = ptr3->data;\n\n        dent = g_new0 (SeafDirent, 1);\n        dent->name = name;\n        memcpy(dent->id, id, 40);\n        dent->id[40] = '\\0';\n        dent->size = *size;\n        dent->mode = STD_FILE_MODE;\n        if (mtime > 0) {\n            dent->mtime = mtime;\n        } else {\n            dent->mtime = (gint64)time(NULL);\n        }\n\n        dents = g_list_append (dents, dent);\n    }\n    /* if parent_dir is a absolutely path, we will remove the first '/' */\n    if (*parent_dir == '/')\n        parent_dir = parent_dir + 1;\n\n    ret = post_multi_files_recursive(repo, root_id, parent_dir,\n                                     dents, user, replace_existed, name_list);\n    g_list_free_full (dents, g_free);\n\n    return ret;\n}\n\nstatic GList *\njson_to_file_list (const char *files_json)\n{\n    json_t *array;\n    GList *files = NULL;\n    json_error_t jerror;\n    size_t index;\n    json_t *value;\n    const char *file;\n    char *norm_file;\n\n    array = json_loadb (files_json, strlen(files_json), 0, &jerror);\n    if (!array) {\n        seaf_warning (\"Failed to load json file list: %s.\\n\", jerror.text);\n        return NULL;\n    }\n\n    size_t n = json_array_size (array);\n    for (index = 0; index < n; index++) {\n        value = json_array_get (array, index);\n        file = json_string_value (value);\n        if (!file) {\n            g_list_free_full (files, g_free);\n            files = NULL;\n            break;\n        }\n\n        norm_file = normalize_utf8_path (file);\n        if (!norm_file) {\n            g_list_free_full (files, g_free);\n            files = NULL;\n            break;\n        }\n\n        files = g_list_prepend (files, norm_file);\n    }\n\n    json_decref (array);\n    return g_list_reverse(files);\n}\n\n/*\n * Return [{'name': 'file1', 'id': 'id1', 'size': num1}, {'name': 'file2', 'id': 'id2', 'size': num2}]\n */\nstatic char *\nformat_json_ret (GList *name_list, GList *id_list, GList *size_list)\n{\n    json_t *array, *obj;\n    GList *ptr, *ptr2;\n    GList *sptr;\n    char *filename, *id;\n    gint64 *size;\n    char *json_data;\n    char *ret;\n\n    array = json_array ();\n\n    for (ptr = name_list, ptr2 = id_list, sptr = size_list;\n         ptr && ptr2 && sptr;\n         ptr = ptr->next, ptr2 = ptr2->next, sptr = sptr->next) {\n        filename = ptr->data;\n        id = ptr2->data;\n        size = sptr->data;\n        obj = json_object ();\n        json_object_set_string_member (obj, \"name\", filename);\n        json_object_set_string_member (obj, \"id\", id);\n        json_object_set_int_member (obj, \"size\", *size);\n        json_array_append_new (array, obj);\n    }\n\n    json_data = json_dumps (array, 0);\n    json_decref (array);\n\n    ret = g_strdup (json_data);\n    free (json_data);\n    return ret;\n}\n\nstatic gboolean\ncheck_files_with_same_name (SeafRepo *repo, const char *parent_dir, GList *filenames)\n{\n    char *canon_path = NULL;\n    SeafCommit *commit = NULL;\n    SeafDir *dir = NULL;\n    gboolean ret = FALSE;\n\n    commit = seaf_commit_manager_get_commit(seaf->commit_mgr, repo->id, repo->version, repo->head->commit_id);\n    if (!commit) {\n        seaf_warning (\"commit %s:%s doesn't exist.\\n\", repo->id, repo->head->commit_id);\n        goto out;\n    }\n\n    canon_path = get_canonical_path (parent_dir);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               commit->root_id,\n                                               canon_path, NULL);\n    if (!dir) {\n        goto out;\n    }\n\n    GList *ptr;\n    for (ptr = filenames; ptr; ptr = ptr->next) {\n        char *name = ptr->data;\n        char *unique_name = NULL;\n        unique_name = generate_unique_filename (name, dir->entries);\n        if (!unique_name) {\n            ret = TRUE;\n            goto out;\n        }\n        g_free (unique_name);\n    }\nout:\n    g_free (canon_path);\n    seaf_dir_free (dir);\n    seaf_commit_unref (commit);\n\n    return ret;\n}\n\nint\nseaf_repo_manager_post_multi_files (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *parent_dir,\n                                    const char *filenames_json,\n                                    const char *paths_json,\n                                    const char *user,\n                                    int replace_existed,\n                                    gint64 mtime,\n                                    char **ret_json,\n                                    char **task_id,\n                                    GError **error)\n{\n    SeafRepo *repo = NULL;\n    char *canon_path = NULL;\n    GList *filenames = NULL, *paths = NULL, *id_list = NULL, *size_list = NULL, *ptr;\n    char *filename, *path;\n    char *gc_id = NULL;\n    unsigned char sha1[20];\n    SeafileCrypt *crypt = NULL;\n    char hex[41];\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n\n    canon_path = get_canonical_path (parent_dir);\n\n    /* Decode file name and tmp file paths from json. */\n    filenames = json_to_file_list (filenames_json);\n    paths = json_to_file_list (paths_json);\n    if (!filenames || !paths) {\n        seaf_debug (\"[post files] Invalid filenames or paths.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid files\");\n        ret = -1;\n        goto out;\n    }\n\n    if (!replace_existed && check_files_with_same_name (repo, parent_dir, filenames))  {\n        seaf_debug (\"[post files] Too many files with same name.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_FILES_WITH_SAME_NAME, \"Too many files with same name\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Check inputs. */\n    for (ptr = filenames; ptr; ptr = ptr->next) {\n        filename = ptr->data;\n        if (should_ignore_file (filename, NULL)) {\n            seaf_debug (\"[post files] Invalid filename %s.\\n\", filename);\n            g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_FILENAME,\n                         \"%s\", filename);\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (strstr (parent_dir, \"//\") != NULL) {\n        seaf_debug (\"[post file] parent_dir cantains // sequence.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid parent dir\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Index tmp files and get file id list. */\n    if (repo->encrypted) {\n        unsigned char key[32], iv[16];\n        if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr,\n                                                     repo_id, user,\n                                                     key, iv) < 0) {\n            seaf_debug (\"Passwd for repo %s is not set.\\n\", repo_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Passwd is not set\");\n            ret = -1;\n            goto out;\n        }\n        crypt = seafile_crypt_new (repo->enc_version, key, iv);\n    }\n\n    if (!task_id) {\n        gint64 *size;\n        gc_id = seaf_repo_get_current_gc_id(repo);\n        for (ptr = paths; ptr; ptr = ptr->next) {\n            path = ptr->data;\n\n            size = g_new (gint64, 1);\n            if (seaf_fs_manager_index_blocks (seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              path, sha1, size, crypt, TRUE, FALSE, NULL) < 0) {\n                seaf_warning (\"failed to index blocks\");\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Failed to index blocks\");\n                ret = -1;\n                goto out;\n            }\n\n            rawdata_to_hex(sha1, hex, 20);\n            id_list = g_list_prepend (id_list, g_strdup(hex));\n            size_list = g_list_prepend (size_list, size);\n        }\n        id_list = g_list_reverse (id_list);\n        size_list = g_list_reverse (size_list);\n\n        ret = post_files_and_gen_commit (filenames,\n                                         repo->id,\n                                         user,\n                                         ret_json,\n                                         replace_existed,\n                                         canon_path,\n                                         id_list,\n                                         size_list,\n                                         mtime,\n                                         gc_id,\n                                         error);\n    } else {\n        ret = index_blocks_mgr_start_index (seaf->index_blocks_mgr,\n                                            filenames,\n                                            paths,\n                                            repo_id,\n                                            user,\n                                            replace_existed,\n                                            ret_json == NULL ? FALSE : TRUE,\n                                            canon_path,\n                                            crypt,\n                                            task_id);\n    }\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    string_list_free (filenames);\n    string_list_free (paths);\n    string_list_free (id_list);\n    for (ptr = size_list; ptr; ptr = ptr->next)\n        g_free (ptr->data);\n    g_list_free (size_list);\n    g_free (canon_path);\n    g_free (crypt);\n    g_free (gc_id);\n\n    return ret;\n}\n\nint\npost_files_and_gen_commit (GList *filenames,\n                           const char *repo_id,\n                           const char *user,\n                           char **ret_json,\n                           int replace_existed,\n                           const char *canon_path,\n                           GList *id_list,\n                           GList *size_list,\n                           gint64 mtime,\n                           char *last_gc_id,\n                           GError **error)\n{\n    SeafRepo *repo = NULL;\n    GList *name_list = NULL;\n    GString *buf = g_string_new (NULL);\n    SeafCommit *head_commit = NULL;\n    char *root_id = NULL;\n    int ret = 0;\n    int retry_cnt = 0;\n    gboolean handle_concurrent_update = TRUE;\n\n    if (replace_existed == 0) {\n        handle_concurrent_update = FALSE;\n    }\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\nretry:\n    /* Add the files to parent dir and commit. */\n    root_id = do_post_multi_files (repo, head_commit->root_id, canon_path,\n                                   filenames, id_list, size_list, user,\n                                   replace_existed, mtime, &name_list);\n    if (!root_id) {\n        seaf_warning (\"[post multi-file] Failed to post files to %s in repo %s.\\n\",\n                      canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Failed to put file\");\n        ret = -1;\n        goto out;\n    }\n    guint len = g_list_length (filenames);\n    if (len > 1)\n        g_string_printf (buf, \"Added \\\"%s\\\" and %u more files.\",\n                         (char *)(filenames->data), len - 1);\n    else\n        g_string_printf (buf, \"Added \\\"%s\\\".\", (char *)(filenames->data));\n\n    if (gen_new_commit (repo->id, head_commit, root_id,\n                        user, buf->str, NULL, handle_concurrent_update, TRUE, last_gc_id, error) < 0) {\n        if (*error == NULL || (*error)->code != SEAF_ERR_CONCURRENT_UPLOAD) {\n            ret = -1;\n            goto out;\n        }\n\n        retry_cnt++;\n        seaf_debug (\"[post multi-file] Concurrent upload retry :%d\\n\", retry_cnt);\n\n        /* Sleep random time between 0 and 3 seconds. */\n        usleep (g_random_int_range(0, 30) * 100 * 1000);\n\n        g_free (root_id);\n        g_clear_error (error);\n\n        seaf_repo_unref (repo);\n        seaf_commit_unref(head_commit);\n\n        GET_REPO_OR_FAIL(repo, repo_id);\n        GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n        goto retry;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, repo->id, NULL);\n\n    if (ret_json)\n        *ret_json = format_json_ret (name_list, id_list, size_list);\n\n    update_repo_size(repo->id);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    string_list_free (name_list);\n    g_string_free (buf, TRUE);\n    g_free (root_id);\n\n    return ret;\n}\n\n/* int */\n/* seaf_repo_manager_post_file_blocks (SeafRepoManager *mgr, */\n/*                                     const char *repo_id, */\n/*                                     const char *parent_dir, */\n/*                                     const char *file_name, */\n/*                                     const char *blockids_json, */\n/*                                     const char *paths_json, */\n/*                                     const char *user, */\n/*                                     gint64 file_size, */\n/*                                     int replace_existed, */\n/*                                     char **new_id, */\n/*                                     GError **error) */\n/* { */\n/*     SeafRepo *repo = NULL; */\n/*     SeafCommit *head_commit = NULL; */\n/*     char *canon_path = NULL; */\n/*     unsigned char sha1[20]; */\n/*     char buf[SEAF_PATH_MAX]; */\n/*     char *root_id = NULL; */\n/*     SeafDirent *new_dent = NULL; */\n/*     GList *blockids = NULL, *paths = NULL, *ptr; */\n/*     char hex[41]; */\n/*     int ret = 0; */\n\n/*     blockids = json_to_file_list (blockids_json); */\n/*     paths = json_to_file_list (paths_json); */\n/*     if (g_list_length(blockids) != g_list_length(paths)) { */\n/*         seaf_debug (\"[post-blks] Invalid blockids or paths.\\n\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid files\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     for (ptr = paths; ptr; ptr = ptr->next) { */\n/*         char *temp_file_path = ptr->data; */\n/*         if (g_access (temp_file_path, R_OK) != 0) { */\n/*             seaf_warning (\"[post-blks] File block %s doesn't exist or not readable.\\n\", */\n/*                           temp_file_path); */\n/*             g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                          \"Invalid input file\"); */\n/*             ret = -1; */\n/*             goto out; */\n/*         } */\n/*     } */\n\n/*     GET_REPO_OR_FAIL(repo, repo_id); */\n/*     GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id); */\n\n/*     if (!canon_path) */\n/*         canon_path = get_canonical_path (parent_dir); */\n\n/*     if (should_ignore_file (file_name, NULL)) { */\n/*         seaf_debug (\"[post-blks] Invalid filename %s.\\n\", file_name); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Invalid filename\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     if (strstr (parent_dir, \"//\") != NULL) { */\n/*         seaf_debug (\"[post-blks] parent_dir cantains // sequence.\\n\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Invalid parent dir\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     /\\* Write blocks. *\\/ */\n/*     if (seaf_fs_manager_index_file_blocks (seaf->fs_mgr, */\n/*                                            repo->store_id, repo->version, */\n/*                                            paths, */\n/*                                            blockids, sha1, file_size) < 0) { */\n/*         seaf_warning (\"Failed to index file blocks\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */\n/*                      \"Failed to index blocks\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     rawdata_to_hex(sha1, hex, 20); */\n/*     new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), */\n/*                                 hex, STD_FILE_MODE, file_name, */\n/*                                 (gint64)time(NULL), user, file_size); */\n\n/*     root_id = do_post_file_replace (repo, head_commit->root_id, */\n/*                                     canon_path, replace_existed, new_dent); */\n/*     if (!root_id) { */\n/*         seaf_warning (\"[post-blks] Failed to post file to %s in repo %s.\\n\", */\n/*                       canon_path, repo->id); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */\n/*                      \"Failed to put file\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     *new_id = g_strdup(hex); */\n/*     snprintf(buf, SEAF_PATH_MAX, \"Added \\\"%s\\\"\", file_name); */\n/*     if (gen_new_commit (repo_id, head_commit, root_id, */\n/*                         user, buf, NULL, error) < 0) */\n/*         ret = -1; */\n\n/* out: */\n/*     if (repo) */\n/*         seaf_repo_unref (repo); */\n/*     if (head_commit) */\n/*         seaf_commit_unref(head_commit); */\n/*     string_list_free (blockids); */\n/*     string_list_free (paths); */\n/*     seaf_dirent_free (new_dent); */\n/*     g_free (root_id); */\n/*     g_free (canon_path); */\n\n/*     if (ret == 0) */\n/*         update_repo_size(repo_id); */\n\n/*     return ret; */\n/* } */\n\nint\nseaf_repo_manager_post_blocks (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *blockids_json,\n                               const char *paths_json,\n                               const char *user,\n                               GError **error)\n{\n    SeafRepo *repo = NULL;\n    GList *blockids = NULL, *paths = NULL, *ptr;\n    int ret = 0;\n\n    blockids = json_to_file_list (blockids_json);\n    paths = json_to_file_list (paths_json);\n    if (g_list_length(blockids) != g_list_length(paths)) {\n        seaf_warning (\"[post-blks] Invalid blockids or paths.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid files\");\n        ret = -1;\n        goto out;\n    }\n\n    for (ptr = paths; ptr; ptr = ptr->next) {\n        char *temp_file_path = ptr->data;\n        if (g_access (temp_file_path, R_OK) != 0) {\n            seaf_warning (\"[post-blks] File block %s doesn't exist or not readable.\\n\",\n                          temp_file_path);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Invalid input file\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n\n    /* Write blocks. */\n    if (seaf_fs_manager_index_raw_blocks (seaf->fs_mgr,\n                                          repo->store_id,\n                                          repo->version,\n                                          paths,\n                                          blockids) < 0) {\n        seaf_warning (\"Failed to index file blocks.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to index blocks\");\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    string_list_free (blockids);\n    string_list_free (paths);\n\n    if (ret == 0)\n        update_repo_size(repo_id);\n\n    return ret;\n}\n\nstatic int\ncheck_quota_before_commit_blocks (const char *store_id,\n                                  int version,\n                                  GList *blockids)\n{\n    GList *ptr;\n    char *blockid;\n    gint64 total_size = 0;\n    BlockMetadata *bmd;\n\n    for (ptr = blockids; ptr; ptr = ptr->next) {\n        blockid = ptr->data;\n        bmd = seaf_block_manager_stat_block (seaf->block_mgr, store_id, version, blockid);\n        if (!bmd) {\n            seaf_warning (\"Failed to stat block %s in store %s.\\n\",\n                          blockid, store_id);\n            return -1;\n        }\n\n        total_size += (gint64)bmd->size;\n        g_free (bmd);\n    }\n\n    return seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, store_id, total_size);\n}\n\nint\nseaf_repo_manager_commit_file_blocks (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *parent_dir,\n                                      const char *file_name,\n                                      const char *blockids_json,\n                                      const char *user,\n                                      gint64 file_size,\n                                      int replace_existed,\n                                      gint64 mtime,\n                                      char **new_id,\n                                      GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    unsigned char sha1[20];\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafDirent *new_dent = NULL;\n    GList *blockids = NULL;\n    char hex[41];\n    char *gc_id = NULL;\n    int ret = 0;\n\n    blockids = json_to_file_list (blockids_json);\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    if (!canon_path)\n        canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (file_name, NULL)) {\n        seaf_warning (\"[post-blks] Invalid filename %s.\\n\", file_name);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid filename\");\n        ret = -1;\n        goto out;\n    }\n\n    if (strstr (parent_dir, \"//\") != NULL) {\n        seaf_warning (\"[post-blks] parent_dir cantains // sequence.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid parent dir\");\n        ret = -1;\n        goto out;\n    }\n\n    int rc = check_quota_before_commit_blocks (repo->store_id, repo->version, blockids);\n    if (rc != 0) {\n        g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_QUOTA_FULL,\n                     \"Quota full\");\n        ret = -1;\n        goto out;\n    }\n\n    gc_id = seaf_repo_get_current_gc_id (repo);\n\n    /* Write blocks. */\n    if (seaf_fs_manager_index_existed_file_blocks (\n            seaf->fs_mgr, repo->store_id, repo->version,\n            blockids, sha1, file_size) < 0) {\n        seaf_warning (\"Failed to index existed file  blocks.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, POST_FILE_ERR_BLOCK_MISSING,\n                     \"Failed to index file blocks\");\n        ret = -1;\n        goto out;\n    }\n\n    rawdata_to_hex(sha1, hex, 20);\n    if (mtime <= 0) {\n        mtime = (gint64)time(NULL);\n    }\n    new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                hex, STD_FILE_MODE, file_name,\n                                mtime, user, file_size);\n\n    root_id = do_post_file_replace (repo, head_commit->root_id,\n                                    canon_path, replace_existed, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[post-blks] Failed to post file to %s in repo %s.\\n\",\n                      canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to put file\");\n        ret = -1;\n        goto out;\n    }\n\n    *new_id = g_strdup(hex);\n    snprintf(buf, SEAF_PATH_MAX, \"Added \\\"%s\\\"\", file_name);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, TRUE, gc_id, error) < 0)\n        ret = -1;\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    string_list_free (blockids);\n    seaf_dirent_free (new_dent);\n    g_free (root_id);\n    g_free (canon_path);\n    g_free (gc_id);\n\n    if (ret == 0)\n        update_repo_size(repo_id);\n\n    return ret;\n}\n\nstatic char *\ndel_file_recursive(SeafRepo *repo,\n                   const char *dir_id,\n                   const char *to_path,\n                   const char *filename,\n                   int *mode, int *p_deleted_num, char **desc_file)\n{\n    SeafDir *olddir, *newdir;\n    SeafDirent *dent;\n    GList *ptr;\n    char *to_path_dup = NULL;\n    char *remain = NULL;\n    char *slash;\n    char *id = NULL;\n    char *ret = NULL;\n    int deleted_num = 0;\n\n    olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr,\n                                                repo->store_id, repo->version,\n                                                dir_id);\n    if (!olddir)\n        return NULL;\n\n    /* we reach the target dir. Remove the given entry from it. */\n    if (*to_path == '\\0') {\n        SeafDirent *old, *new;\n        GList *newentries = NULL, *p;\n        GList *filenames = NULL, *ptr;\n        char *name;\n        int found_flag;\n\n        filenames = json_to_file_list (filename);\n        if (!filenames) {\n            seaf_dir_free(olddir);\n            return NULL;\n        }\n\n        for (p = olddir->entries; p != NULL; p = p->next) {\n            found_flag = 0;\n            old = p->data;\n            for (ptr = filenames; ptr; ptr = ptr->next) {\n                name = ptr->data;\n                if (strcmp(old->name, name) == 0) {\n                    found_flag = 1;\n                    deleted_num++;\n                    if (mode)\n                        *mode = old->mode;\n                    if (desc_file && *desc_file==NULL)\n                        *desc_file = g_strdup(old->name);\n                    break;\n                }\n            }\n            if (!found_flag) {\n                new = seaf_dirent_dup (old);\n                newentries = g_list_prepend (newentries, new);\n            }\n        }\n\n        string_list_free (filenames);\n\n        if (deleted_num == 0) {\n            ret = g_strdup(olddir->dir_id);\n            if (newentries)\n                g_list_free_full (newentries, (GDestroyNotify)seaf_dirent_free);\n            goto out;\n        }\n\n        newentries = g_list_reverse (newentries);\n\n        newdir = seaf_dir_new(NULL, newentries,\n                              dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save(seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup(newdir->dir_id);\n        seaf_dir_free(newdir);\n        goto out;\n    }\n\n    to_path_dup = g_strdup (to_path);\n    slash = strchr (to_path_dup, '/');\n\n    if (!slash) {\n        remain = to_path_dup + strlen(to_path_dup);\n    } else {\n        *slash = '\\0';\n        remain = slash + 1;\n    }\n\n    for (ptr = olddir->entries; ptr; ptr = ptr->next) {\n        dent = (SeafDirent *)ptr->data;\n\n        if (strcmp(dent->name, to_path_dup) != 0)\n            continue;\n\n        id = del_file_recursive(repo, dent->id, remain, filename,\n                                mode, &deleted_num, desc_file);\n        if (id != NULL && deleted_num > 0) {\n            memcpy(dent->id, id, 40);\n            dent->id[40] = '\\0';\n            if (repo->version > 0)\n                dent->mtime = (guint64)time(NULL);\n        }\n        break;\n    }\n    if (id != NULL) {\n        if (deleted_num == 0) {\n            ret = g_strdup(olddir->dir_id);\n        } else {\n            /* Create a new SeafDir. */\n            GList *new_entries;\n        \n            new_entries = dup_seafdir_entries (olddir->entries);\n            newdir = seaf_dir_new (NULL, new_entries,\n                                   dir_version_from_repo_version(repo->version));\n            if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n                ret = g_strdup (newdir->dir_id);\n            seaf_dir_free (newdir);\n        }\n    }\n\nout:\n    if (p_deleted_num)\n        *p_deleted_num = deleted_num;\n\n    g_free (to_path_dup);\n    g_free (id);\n    seaf_dir_free(olddir);\n    return ret;\n}\n\nstatic char *\ndo_del_file(SeafRepo *repo,\n            const char *root_id,\n            const char *parent_dir,\n            const char *file_name,\n            int *mode, int *deleted_num, char **desc_file)\n{\n    /* if parent_dir is a absolutely path, we will remove the first '/' */\n    if (*parent_dir == '/')\n        parent_dir = parent_dir + 1;\n\n    return del_file_recursive(repo, root_id, parent_dir, file_name,\n                              mode, deleted_num, desc_file);\n}\n\nint\nseaf_repo_manager_del_file (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *parent_dir,\n                            const char *file_name,\n                            const char *user,\n                            GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    SeafDir *dir = NULL;\n    char *canon_path = NULL;\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    char *desc_file = NULL;\n    int mode = 0;\n    int ret = 0;\n    int deleted_num = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    if (!canon_path)\n        canon_path = get_canonical_path (parent_dir);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               head_commit->root_id, canon_path, NULL);\n    if (!dir) {\n        seaf_warning (\"parent_dir %s doesn't exist in repo %s.\\n\",\n                      canon_path, repo->store_id);\n        ret = -1;\n        goto out;\n    }\n\n    root_id = do_del_file (repo,\n                           head_commit->root_id, canon_path, file_name, &mode,\n                           &deleted_num, &desc_file);\n    if (!root_id) {\n        seaf_warning (\"[del file] Failed to del file from %s in repo %s.\\n\",\n                      canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to del file\");\n        ret = -1;\n        goto out;\n    }\n    if (deleted_num == 0) {\n        goto out;\n    }\n\n    /* Commit. */\n    if (deleted_num > 1) {\n        snprintf(buf, SEAF_PATH_MAX, \"Deleted \\\"%s\\\" and %d more files\",\n                                      desc_file, deleted_num - 1);\n    } else if (S_ISDIR(mode)) {\n        snprintf(buf, SEAF_PATH_MAX, \"Removed directory \\\"%s\\\"\", desc_file);\n    } else {\n        snprintf(buf, SEAF_PATH_MAX, \"Deleted \\\"%s\\\"\", desc_file);\n    }\n\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    if (dir)\n        seaf_dir_free (dir);\n    g_free (root_id);\n    g_free (canon_path);\n    g_free (desc_file);\n\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\nvoid\ndo_batch_del_files (ChangeSet *changeset,\n                    const char *file_list,\n                    int *mode, int *deleted_num, char **desc_file)\n{\n    GList *filepaths = NULL, *ptr;\n    char *name;\n\n    filepaths = json_to_file_list (file_list);\n\n    for (ptr = filepaths; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        if (!name || g_strcmp0 (name, \"\") == 0) {\n            continue;\n        }\n        char *canon_path = get_canonical_path (name);\n        char *base_name= g_path_get_basename (canon_path);\n        char *del_path = canon_path;\n        if (canon_path[0] == '/') {\n            del_path = canon_path + 1;\n        }\n\n        remove_from_changeset (changeset, del_path, FALSE, NULL, mode);\n\n        (*deleted_num)++;\n        if (desc_file && *desc_file == NULL)\n            *desc_file = g_strdup (base_name);\n\n        g_free (canon_path);\n        g_free (base_name);\n    }\n\n    string_list_free (filepaths);\n}\n\nint\nseaf_repo_manager_batch_del_files (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *file_list,\n                                   const char *user,\n                                   GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    SeafDir *dir = NULL;\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    char *desc_file = NULL;\n    ChangeSet *changeset = NULL;\n    int mode = 0;\n    int ret = 0;\n    int deleted_num = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    dir = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr,\n                                              repo->store_id, repo->version,\n                                              head_commit->root_id);\n    if (!dir) {\n        seaf_warning (\"root dir doesn't exist in repo %s.\\n\",\n                      repo->store_id);\n        ret = -1;\n        goto out;\n    }\n\n    changeset = changeset_new (repo_id, dir);\n    if (!changeset) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to batch del files\");\n        ret = -1;\n        goto out;\n    }\n\n    do_batch_del_files (changeset, file_list, &mode, &deleted_num, &desc_file);\n\n    if (deleted_num == 0) {\n        goto out;\n    }\n\n    root_id = commit_tree_from_changeset (changeset);\n    if (!root_id) {\n        seaf_warning (\"Failed to commit changeset for repo %s.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to batch del files\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    if (deleted_num > 1) {\n        snprintf(buf, SEAF_PATH_MAX, \"Deleted \\\"%s\\\" and %d more files\",\n                                      desc_file, deleted_num - 1);\n    } else if (S_ISDIR(mode)) {\n        snprintf(buf, SEAF_PATH_MAX, \"Removed directory \\\"%s\\\"\", desc_file);\n    } else {\n        snprintf(buf, SEAF_PATH_MAX, \"Deleted \\\"%s\\\"\", desc_file);\n    }\n\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    changeset_free (changeset);\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    if (dir)\n        seaf_dir_free (dir);\n    g_free (root_id);\n    g_free (desc_file);\n\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\nstatic SeafDirent *\nget_dirent_by_path (SeafRepo *repo,\n                    const char *root_id,\n                    const char *path,\n                    const char *file_name,\n                    GError **error)\n{\n    SeafCommit *head_commit = NULL; \n    SeafDirent *dent = NULL;\n    SeafDir *dir = NULL;\n\n    if (!root_id) {\n        head_commit = seaf_commit_manager_get_commit(seaf->commit_mgr,\n                                                     repo->id, repo->version, \n                                                     repo->head->commit_id);\n        if (!head_commit) {\n            seaf_warning (\"commit %s:%s doesn't exist.\\n\",\n                          repo->id, repo->head->commit_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid commit\");\n            goto out;\n        }\n        root_id = head_commit->root_id;\n    }\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               root_id,\n                                               path, NULL);\n    if (!dir) {\n        seaf_warning (\"dir %s doesn't exist in repo %s.\\n\", path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid dir\");\n        goto out;\n    }\n\n    GList *p;\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        int r = strcmp (d->name, file_name);\n        if (r == 0) {\n            dent = seaf_dirent_dup(d);\n            break;\n        }\n    }\n\n    if (!dent && error && !(*error)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"failed to get dirent\");\n    }\n\nout:\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n    if (dir)\n        seaf_dir_free (dir);\n\n    return dent;\n}\n\nstatic int\nput_dirent_and_commit (SeafRepo *repo,\n                       const char *path,\n                       SeafDirent *dents[],\n                       int n_dents,\n                       int replace,\n                       const char *user,\n                       gboolean check_gc,\n                       const char *last_gc_id,\n                       GError **error)\n{\n    SeafCommit *head_commit = NULL;\n    char *root_id = NULL;\n    char buf[SEAF_PATH_MAX];\n    int ret = 0, i = 0;\n\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n    \n    root_id = head_commit->root_id;\n\n    GList *dent_list = NULL;\n    GList *name_list = NULL;\n    for (i = 0; i < n_dents; i++)\n        dent_list = g_list_append (dent_list, dents[i]);\n\n    if (*path == '/')\n        path = path + 1;\n    root_id = post_multi_files_recursive (repo, root_id, path, dent_list, user,\n                                          replace, &name_list);\n    g_list_free (dent_list);\n    g_list_free_full (name_list, (GDestroyNotify)g_free);\n\n    if (!root_id) {\n        if (n_dents > 1)\n            seaf_warning (\"[cp file] Failed to cp %s and other %d files to %s in repo %s.\\n\",\n                          dents[0]->name, n_dents - 1, path, repo->id);\n        else\n            seaf_warning (\"[cp file] Failed to cp %s to %s in repo %s.\\n\",\n                          dents[0]->name, path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                \"Failed to cp file\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    if (n_dents > 1) {\n        snprintf(buf, sizeof(buf), \"Added \\\"%s\\\" and %d more files\",\n                                   dents[0]->name, n_dents - 1);\n    } else if (S_ISDIR(dents[0]->mode)) {\n        snprintf(buf, sizeof(buf), \"Added directory \\\"%s\\\"\", dents[0]->name);\n    } else {\n        snprintf(buf, sizeof(buf), \"Added \\\"%s\\\"\", dents[0]->name);\n    }\n\n    if (gen_new_commit (repo->id, head_commit, root_id,\n                        user, buf, NULL, TRUE, check_gc, last_gc_id, error) < 0)\n        ret = -1;\n\nout:\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n    if (root_id)\n        g_free (root_id);\n    \n    return ret;\n}\n\nstatic int\nwrite_block (const char *repo_id, const char *block_id, int version, const char *buf, int len)\n{\n    SeafBlockManager *mgr = seaf->block_mgr;\n    BlockHandle *handle;\n    int n;\n\n    /* Don't write if the block already exists. */\n    if (seaf_block_manager_block_exists (mgr,\n                                         repo_id, version,\n                                         block_id)) {\n        return 0;\n    }\n\n    handle = seaf_block_manager_open_block (mgr,\n                                            repo_id, version,\n                                            block_id, BLOCK_WRITE);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s.\\n\", block_id);\n        return -1;\n    }\n\n    n = seaf_block_manager_write_block (mgr, handle, buf, len);\n    if (n < 0) {\n        seaf_warning (\"Failed to write block %s.\\n\", block_id);\n        seaf_block_manager_close_block (mgr, handle);\n        seaf_block_manager_block_handle_free (mgr, handle);\n        return -1;\n    }\n\n    if (seaf_block_manager_close_block (mgr, handle) < 0) {\n        seaf_warning (\"failed to close block %s.\\n\", block_id);\n        seaf_block_manager_block_handle_free (mgr, handle);\n        return -1;\n    }\n\n    if (seaf_block_manager_commit_block (mgr, handle) < 0) {\n        seaf_warning (\"failed to commit block %s.\\n\", block_id);\n        seaf_block_manager_block_handle_free (mgr, handle);\n        return -1;\n    }\n\n    seaf_block_manager_block_handle_free (mgr, handle);\n    return 0;\n}\n\n// return a new block id.\nstatic char *\ncopy_block_between_enc_repo (SeafRepo *src_repo, SeafRepo *dst_repo,\n                             SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt,\n                             const char *block_id)\n{\n    SeafBlockManager *mgr = seaf->block_mgr;\n    char *ret = NULL;\n    BlockHandle *handle = NULL;\n    BlockMetadata *bmd = NULL;\n    char *buf = NULL;\n    char *src_dec_out = NULL;\n    int src_dec_out_len = -1;\n    SHA_CTX ctx;\n    uint8_t  checksum[CHECKSUM_LENGTH];\n    char checksum_str[41];\n    int block_size = 0;\n    int n;\n\n    if (g_strcmp0 (block_id, EMPTY_SHA1) == 0) {\n        ret = g_strdup (block_id);\n        goto out;\n    }\n\n    // Read block from source repo.\n    handle = seaf_block_manager_open_block(mgr,\n                                           src_repo->store_id,\n                                           src_repo->version,\n                                           block_id, BLOCK_READ);\n    if (!handle) {\n        seaf_warning (\"Failed to open block %s.\\n\", block_id);\n        return NULL;\n    }\n\n    bmd = seaf_block_manager_stat_block_by_handle (mgr, handle);\n    if (!bmd) {\n        seaf_warning (\"Failed to stat block %s by handle.\\n\", block_id);\n        goto out;\n    }\n    block_size = bmd->size;\n\n    if (block_size == 0) {\n        ret = g_strdup (block_id);\n        goto out;\n    }\n\n    buf = g_new (char, block_size);\n\n    n = seaf_block_manager_read_block(seaf->block_mgr, handle, buf, block_size);\n    if (n != block_size) {\n        seaf_warning (\"Failed to read block from source repo %s.\\n\", src_repo->id);\n        \n        goto out;\n    }\n\n    if (src_crypt != NULL) {\n        int rc = seafile_decrypt (&src_dec_out, &src_dec_out_len,\n                                  buf, block_size, src_crypt);\n        if (rc != 0) {\n            seaf_warning (\"Failed to decrypt block %s.\\n\", block_id);\n            goto out;\n        }\n    }\n\n    // Write block to destination repo.\n    if (src_crypt && dst_crypt) {\n        // Both source and destination repos are encrypted reops.\n        char *dst_enc_buf = NULL;\n        int dst_enc_len = -1;\n        int rc = seafile_encrypt (&dst_enc_buf, &dst_enc_len, \n                                  src_dec_out, src_dec_out_len, dst_crypt);\n        if (rc != 0) {\n            seaf_warning (\"Failed to encrypt block for repo %s.\\n\", dst_repo->id);\n            goto out;\n        }\n\n        SHA1_Init (&ctx);\n        SHA1_Update (&ctx, dst_enc_buf, dst_enc_len);\n        SHA1_Final (checksum, &ctx);\n        rawdata_to_hex (checksum, checksum_str, 20);\n        if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, dst_enc_buf, dst_enc_len) < 0) {\n            g_free (dst_enc_buf);\n            goto out;\n        }\n        g_free (dst_enc_buf);\n        ret = g_strdup (checksum_str);\n    } else if (src_crypt && !dst_crypt) {\n        // Source repo is encrypted.\n        SHA1_Init (&ctx);\n        SHA1_Update (&ctx, src_dec_out, src_dec_out_len);\n        SHA1_Final (checksum, &ctx);\n        rawdata_to_hex (checksum, checksum_str, 20);\n        if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, src_dec_out, src_dec_out_len) < 0) {\n            goto out;\n        }\n        ret = g_strdup (checksum_str);\n    } else if (!src_crypt && dst_crypt) {\n        // Destination repo is encrypted.\n        char *dst_enc_buf = NULL;\n        int dst_enc_len = -1;\n        int rc = seafile_encrypt (&dst_enc_buf, &dst_enc_len, \n                                  buf, block_size, dst_crypt);\n        if (rc != 0) {\n            seaf_warning (\"Failed to encrypt block for repo %s.\\n\", dst_repo->id);\n            goto out;\n        }\n\n        SHA1_Init (&ctx);\n        SHA1_Update (&ctx, dst_enc_buf, dst_enc_len);\n        SHA1_Final (checksum, &ctx);\n        rawdata_to_hex (checksum, checksum_str, 20);\n        if (write_block (dst_repo->store_id, checksum_str, dst_repo->version, dst_enc_buf, dst_enc_len) < 0) {\n            g_free (dst_enc_buf);\n            goto out;\n        }\n        g_free (dst_enc_buf);\n        ret = g_strdup (checksum_str);\n    } else if (!src_crypt && !dst_crypt) {\n        // Both source and destination repos are not encrypted reops.\n        if (write_block (dst_repo->store_id, block_id, dst_repo->version, buf, block_size) < 0) {\n            goto out;\n        }\n        ret = g_strdup (checksum_str);\n    }\n\nout:\n    g_free (buf);\n    g_free (src_dec_out);\n    if (handle) {\n        seaf_block_manager_close_block (mgr, handle);\n        seaf_block_manager_block_handle_free (mgr, handle);\n\n    }\n    if (bmd)\n        g_free (bmd);\n\n    return ret;\n}\n\nstatic char *\ncopy_seafile (SeafRepo *src_repo, SeafRepo *dst_repo,\n              SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt,\n              const char *file_id, CopyTask *task, guint64 *size)\n{\n    Seafile *file;\n\n    file = seaf_fs_manager_get_seafile (seaf->fs_mgr,\n                                        src_repo->store_id, src_repo->version,\n                                        file_id);\n    if (!file) {\n        seaf_warning (\"Failed to get file object %s from repo %s.\\n\",\n                      file_id, src_repo->id);\n        return NULL;\n    }\n\n    /* We may be copying from v0 repo to v1 repo or vise versa. */\n    file->version = seafile_version_from_repo_version(dst_repo->version);\n\n    int i;\n    char *block_id;\n    for (i = 0; i < file->n_blocks; ++i) {\n        /* Check cancel before copying a block. */\n        if (task && g_atomic_int_get (&task->canceled)) {\n            seafile_unref (file);\n            return NULL;\n        }\n        block_id = file->blk_sha1s[i];\n        if (src_crypt != NULL || dst_crypt != NULL) {\n            char *new_block_id = copy_block_between_enc_repo (src_repo, dst_repo, src_crypt, dst_crypt, block_id);\n            if (new_block_id == NULL) {\n                seaf_warning (\"Failed to copy block %s from repo %s to %s.\\n\",\n                              block_id, src_repo->id, dst_repo->id);\n                seafile_unref (file);\n                return NULL;\n            }\n            g_free (file->blk_sha1s[i]);\n            file->blk_sha1s[i] = new_block_id;\n        } else {\n            if (seaf_block_manager_copy_block (seaf->block_mgr,\n                                               src_repo->store_id, src_repo->version,\n                                               dst_repo->store_id, dst_repo->version,\n                                               block_id) < 0) {\n                seaf_warning (\"Failed to copy block %s from repo %s to %s.\\n\",\n                              block_id, src_repo->id, dst_repo->id);\n                seafile_unref (file);\n                return NULL;\n            }\n        }\n    }\n\n    // Save fs after copy blocks, block_id may be changed when copy between encrypted repos.\n    if (seafile_save (seaf->fs_mgr,\n                      dst_repo->store_id,\n                      dst_repo->version,\n                      file) < 0) {\n        seaf_warning (\"Failed to copy file object %s from repo %s to %s.\\n\",\n                      file_id, src_repo->id, dst_repo->id);\n        seafile_unref (file);\n        return NULL;\n    }\n\n    if (task)\n        ++(task->done);\n\n    *size = file->file_size;\n    char *ret = g_strdup(file->file_id);\n\n    seafile_unref (file);\n    return ret;\n}\n\nstatic char *\ncopy_recursive (SeafRepo *src_repo, SeafRepo *dst_repo,\n                SeafileCrypt *src_crypt, SeafileCrypt *dst_crypt,\n                const char *obj_id, guint32 mode, const char *modifier,\n                CopyTask *task, guint64 *size)\n{\n    if (S_ISREG(mode)) {\n        return copy_seafile (src_repo, dst_repo, src_crypt, dst_crypt, obj_id, task, size);\n    } else if (S_ISDIR(mode)) {\n        SeafDir *src_dir = NULL, *dst_dir = NULL;\n        GList *dst_ents = NULL, *ptr;\n        char *new_id = NULL;\n        SeafDirent *dent, *new_dent = NULL;\n\n        src_dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                               src_repo->store_id,\n                                               src_repo->version,\n                                               obj_id);\n        if (!src_dir) {\n            seaf_warning (\"Seafdir %s doesn't exist in repo %s.\\n\",\n                          obj_id, src_repo->id);\n            return NULL;\n        }\n\n        for (ptr = src_dir->entries; ptr; ptr = ptr->next) {\n            dent = ptr->data;\n\n            guint64 new_size = 0;\n            new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt,\n                                     dent->id, dent->mode, modifier, task, &new_size);\n            if (!new_id) {\n                seaf_dir_free (src_dir);\n                return NULL;\n            }\n\n            new_dent = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version),\n                                        new_id, dent->mode, dent->name,\n                                        dent->mtime, modifier, new_size);\n            dst_ents = g_list_prepend (dst_ents, new_dent);\n            g_free (new_id);\n        }\n        dst_ents = g_list_reverse (dst_ents);\n\n        seaf_dir_free (src_dir);\n\n        dst_dir = seaf_dir_new (NULL, dst_ents,\n                                dir_version_from_repo_version(dst_repo->version));\n        if (seaf_dir_save (seaf->fs_mgr,\n                           dst_repo->store_id, dst_repo->version,\n                           dst_dir) < 0) {\n            seaf_warning (\"Failed to save new dir.\\n\");\n            seaf_dir_free (dst_dir);\n            return NULL;\n        }\n\n        char *ret = g_strdup(dst_dir->dir_id);\n        *size = 0;\n        seaf_dir_free (dst_dir);\n        return ret;\n    }\n\n    return NULL;\n}\n\nstatic GHashTable *\nget_sub_dirents_hash_map(SeafRepo *repo, const char *parent_dir)\n{\n    GError *error;\n    GList *p;\n    SeafDir *dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id,\n                                               repo->version, repo->root_id, parent_dir, &error);\n    if (!dir) {\n        if (error) {\n            seaf_warning (\"Failed to get dir %s repo %.8s: %s.\\n\",\n                          parent_dir, repo->store_id, error->message);\n            g_clear_error(&error);\n        } else {\n            seaf_warning (\"dir %s doesn't exist in repo %.8s.\\n\",\n                          parent_dir, repo->store_id);\n        }\n        return NULL;\n    }\n\n    GHashTable *dirent_hash = g_hash_table_new_full(g_str_hash,\n                                                    g_str_equal,\n                                                    g_free,\n                                                    (GDestroyNotify)seaf_dirent_free);\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        g_hash_table_insert(dirent_hash, g_strdup(d->name), d);\n    }\n\n    g_list_free (dir->entries);\n    g_free (dir->ondisk);\n    g_free(dir);\n\n    return dirent_hash;\n}\n\nstatic void\nset_failed_reason (char **failed_reason, char *err_str)\n{\n    *failed_reason = g_strdup (err_str);\n}\n\nstatic SeafileCrypt *\nget_crypt_by_repo (SeafRepo *repo, const char *user)\n{\n    char *key_hex, *iv_hex;\n    unsigned char enc_key[32], enc_iv[16];\n    SeafileCryptKey *key = NULL;\n    SeafileCrypt *crypt = NULL;\n\n    key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                               repo->id, user);\n    if (!key) {\n        return NULL;\n    }\n\n    g_object_get (key,\n                  \"key\", &key_hex,\n                  \"iv\", &iv_hex,\n                  NULL);\n    if (repo->enc_version == 1)\n        hex_to_rawdata (key_hex, enc_key, 16);\n    else\n        hex_to_rawdata (key_hex, enc_key, 32);\n    hex_to_rawdata (iv_hex, enc_iv, 16);\n    crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv);\n    g_free (key_hex);\n    g_free (iv_hex);\n\n    g_object_unref (key);\n    return crypt;\n}\n\nstatic int\ncross_repo_copy (const char *src_repo_id,\n                 const char *src_path,\n                 const char *src_filename,\n                 const char *dst_repo_id,\n                 const char *dst_path,\n                 const char *dst_filename,\n                 int replace,\n                 const char *modifier,\n                 CopyTask *task)\n{\n    SeafRepo *src_repo = NULL, *dst_repo = NULL;\n    SeafDirent **src_dents = NULL, **dst_dents = NULL;\n    GList *src_names = NULL, *dst_names = NULL, *ptr;\n    char *name;\n    char *new_id = NULL;\n    guint64 new_size = 0;\n    int ret = 0, i = 0;\n    int file_num = 0;\n    GHashTable *dirent_hash = NULL;\n    gint64 total_size_all = 0;\n    char *err_str = COPY_ERR_INTERNAL;\n    int check_quota_ret;\n    SeafileCrypt *src_crypt = NULL;\n    SeafileCrypt *dst_crypt = NULL;\n    char *gc_id = NULL;\n\n    src_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, src_repo_id);\n    if (!src_repo) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        seaf_warning (\"Failed to get source repo.\\n\");\n        goto out;\n    }\n\n    if (src_repo->encrypted) {\n        src_crypt = get_crypt_by_repo (src_repo, modifier);\n        if (!src_crypt) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"The source repo is encrypted. Please provide password to view it.\\n\");\n            goto out;\n        }\n    }\n\n    dst_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, dst_repo_id);\n    if (!dst_repo) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        seaf_warning (\"Failed to get destination repo.\\n\");\n        goto out;\n    }\n\n    if (dst_repo->encrypted) {\n        dst_crypt = get_crypt_by_repo (dst_repo, modifier);\n        if (!dst_crypt) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"The destination repo is encrypted. Please provide password to view it.\\n\");\n            goto out;\n        }\n    }\n\n    src_names = json_to_file_list (src_filename);\n    dst_names = json_to_file_list (dst_filename);\n    file_num = g_list_length (src_names);\n\n    gc_id = seaf_repo_get_current_gc_id (dst_repo);\n\n    src_dents = g_new0 (SeafDirent *, file_num);\n    dst_dents = g_new0 (SeafDirent *, file_num);\n\n    dirent_hash = get_sub_dirents_hash_map (src_repo, src_path);\n    if (!dirent_hash) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        goto out;\n    }\n\n    gint64 total_files = -1;\n    gint64 total_files_all = 0;\n    /* check filename, size and file count */\n    for (ptr = src_names; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        if (strcmp(name, \"\") == 0) { \n            err_str = COPY_ERR_BAD_ARG;\n            ret = -1;\n            seaf_warning (\"[copy files] Bad args: Empty src_filename.\\n\");\n            goto out; \n        }\n        src_dents[i] = g_hash_table_lookup (dirent_hash, name);\n        if (!src_dents[i]) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"[copy files] File %s not Found.\\n\", name);\n            goto out; \n        }\n        if (S_ISDIR(src_dents[i]->mode))\n            total_files = seaf_fs_manager_count_fs_files (seaf->fs_mgr,\n                                                          src_repo->store_id,\n                                                          src_repo->version,\n                                                          src_dents[i]->id);\n        else\n            total_files = 1;\n        if (total_files < 0) {\n            err_str = COPY_ERR_INTERNAL;\n            seaf_warning (\"Failed to get file count.\\n\");\n            ret = -1;\n            goto out;\n        }\n        total_files_all += total_files;\n        if (!check_file_count_and_size (src_repo, src_dents[i], total_files_all,\n                                        &total_size_all, &err_str)) {\n            ret = -1;\n            goto out;\n        }\n        i++;\n    }\n\n    check_quota_ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, dst_repo_id, total_size_all);\n    if (check_quota_ret != 0) {\n        if (check_quota_ret == -1) {\n           err_str = COPY_ERR_INTERNAL;\n           seaf_warning (\"Failed to check quota.\\n\");\n        } else {\n           err_str = COPY_ERR_QUOTA_IS_FULL;\n        }\n        ret = -1;\n        goto out;\n    }\n\n    if (task)\n        task->total = total_files_all;\n\n    i = 0;\n    /* do copy */\n    for (ptr = dst_names; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt,\n                                 src_dents[i]->id, src_dents[i]->mode, modifier, task,\n                                 &new_size);\n        if (!new_id) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"[copy files] Failed to copy file %s.\\n\", src_dents[i]->name);\n            goto out;\n        }\n        dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version),\n                                        new_id, src_dents[i]->mode, name,\n                                        src_dents[i]->mtime, modifier, new_size);\n        g_free (new_id);\n        i++;\n    }\n\n    if (put_dirent_and_commit (dst_repo,\n                               dst_path,\n                               dst_dents,\n                               file_num,\n                               replace,\n                               modifier,\n                               TRUE,\n                               gc_id,\n                               NULL) < 0) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        goto out;\n    }\n\n    if (task)\n        task->successful = TRUE;\n\n    seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, dst_repo_id, NULL);\n\nout:\n    if (src_repo)\n        seaf_repo_unref (src_repo);\n    if (dst_repo)\n        seaf_repo_unref (dst_repo);\n    g_free (src_crypt);\n    g_free (dst_crypt);\n    if (dirent_hash)\n         g_hash_table_unref(dirent_hash);\n    g_free(src_dents);\n    for (i = 0; i < file_num; i++)\n        seaf_dirent_free (dst_dents[i]);\n    g_free (dst_dents);\n    if (src_names)\n        string_list_free (src_names);\n    if (dst_names)\n        string_list_free (dst_names);\n    g_free (gc_id);\n\n    if (ret == 0) {\n        update_repo_size (dst_repo_id);\n    } else {\n        if (task && !task->canceled) {\n            task->failed = TRUE;\n            set_failed_reason (&(task->failed_reason), err_str);\n        }\n    }\n\n    return ret;\n}\n\nstatic gboolean\nis_virtual_repo_and_origin (SeafRepo *repo1, SeafRepo *repo2)\n{\n    if (repo1->virtual_info &&\n        strcmp (repo1->virtual_info->origin_repo_id, repo2->id) == 0)\n        return TRUE;\n    if (repo2->virtual_info &&\n        strcmp (repo2->virtual_info->origin_repo_id, repo1->id) == 0)\n        return TRUE;\n    return FALSE;\n}\n\nstatic gboolean\ncheck_file_count_and_size (SeafRepo *repo, SeafDirent *dent, gint64 total_files,\n                           gint64 *total_size_all, char **err_str)\n{\n    gint64 total_file_size = 0;\n    gint64 size = -1;\n\n    if (seaf->copy_mgr->max_files > 0 &&\n        total_files > seaf->copy_mgr->max_files) {\n        *err_str = COPY_ERR_TOO_MANY_FILES;\n        seaf_warning(\"Failed to copy/move file from repo %.8s: Too many files\\n\", repo->id);\n        return FALSE;\n    }\n\n    if (S_ISREG(dent->mode)) {\n        if (repo->version > 0)\n            size = dent->size;\n        else\n            size = seaf_fs_manager_get_file_size (seaf->fs_mgr,\n                                                  repo->store_id,\n                                                  repo->version,\n                                                  dent->id);\n    } else {\n        size = seaf_fs_manager_get_fs_size (seaf->fs_mgr,\n                                            repo->store_id,\n                                            repo->version,\n                                            dent->id);\n    }\n\n    if (size < 0) {\n        *err_str = COPY_ERR_INTERNAL;\n        seaf_warning (\"Failed to get dir size of %s:%s.\\n\",\n                      repo->store_id, dent->id);\n        return FALSE;\n    }\n\n    if (total_size_all) {\n        *total_size_all += size;\n        total_file_size = *total_size_all;\n    }\n\n    if (seaf->copy_mgr->max_size > 0) {\n        if (total_file_size > seaf->copy_mgr->max_size) {\n            *err_str = COPY_ERR_SIZE_TOO_LARGE;\n            seaf_warning(\"Failed to copy/move file from repo %.8s: \"\n                         \"Folder or file size is too large.\\n\", repo->id);\n            return FALSE;\n        }\n    }\n\n    return TRUE;\n}\n\n/**\n * Copy a SeafDirent from a SeafDir to another.\n * \n * 1. When @src_repo and @dst_repo are not the same repo, neither of them\n *    should be encrypted.\n * \n * 2. the file being copied must not exist in the dst path of the dst repo.\n */\nSeafileCopyResult *\nseaf_repo_manager_copy_file (SeafRepoManager *mgr,\n                             const char *src_repo_id,\n                             const char *src_path,\n                             const char *src_filename,\n                             const char *dst_repo_id,\n                             const char *dst_path,\n                             const char *dst_filename,\n                             const char *user,\n                             int need_progress,\n                             int synchronous,\n                             GError **error)\n{\n    SeafRepo *src_repo = NULL, *dst_repo = NULL;\n    SeafDirent *src_dent = NULL, *dst_dent = NULL;\n    char *src_canon_path = NULL, *dst_canon_path = NULL;\n    SeafCommit *dst_head_commit = NULL;\n    int ret = 0;\n    gboolean background = FALSE;\n    char *task_id = NULL;\n    SeafileCopyResult *res= NULL;\n\n    GET_REPO_OR_FAIL(src_repo, src_repo_id);\n\n    if (strcmp(src_repo_id, dst_repo_id) != 0) {\n        GET_REPO_OR_FAIL(dst_repo, dst_repo_id);\n\n        if (src_repo->encrypted || dst_repo->encrypted) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Can't copy files between encrypted repo(s)\");\n            ret = -1;\n            goto out;\n        }\n        \n    } else {\n        seaf_repo_ref (src_repo);\n        dst_repo = src_repo;\n    }\n    \n    src_canon_path = get_canonical_path (src_path);\n    dst_canon_path = get_canonical_path (dst_path);\n\n    GET_COMMIT_OR_FAIL(dst_head_commit,\n                       dst_repo->id, dst_repo->version, \n                       dst_repo->head->commit_id);\n    \n    /* FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version,\n                        dst_head_commit->root_id, dst_canon_path, dst_filename, NULL); */\n\n    if (strcmp (src_repo_id, dst_repo_id) == 0 ||\n        is_virtual_repo_and_origin (src_repo, dst_repo)) {\n\n        /* get src dirent */\n        src_dent = get_dirent_by_path (src_repo, NULL,\n                                       src_canon_path, src_filename, error);\n        if (!src_dent) {\n            seaf_warning(\"[copy file] file %s/%s doesn't exist.\\n\", src_canon_path, src_filename);\n            ret = -1;\n            goto out;\n        }\n\n        gint64 file_size = (src_dent->version > 0) ? src_dent->size : -1;\n\n        /* duplicate src dirent with new name */\n        dst_dent = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version),\n                                    src_dent->id, src_dent->mode, dst_filename,\n                                    src_dent->mtime, user, file_size);\n\n        if (put_dirent_and_commit (dst_repo,\n                                   dst_canon_path,\n                                   &dst_dent,\n                                   1,\n                                   0,\n                                   user,\n                                   FALSE,\n                                   NULL,\n                                   error) < 0) {\n            if (!error)\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"failed to put dirent\");\n            ret = -1;\n            goto out;\n        }\n\n        seaf_repo_manager_merge_virtual_repo (mgr, dst_repo_id, NULL);\n\n        update_repo_size (dst_repo_id);\n    } else if (!synchronous) {\n        background = TRUE;\n        task_id = seaf_copy_manager_add_task (seaf->copy_mgr,\n                                              src_repo_id,\n                                              src_canon_path,\n                                              src_filename,\n                                              dst_repo_id,\n                                              dst_canon_path,\n                                              dst_filename,\n                                              0,\n                                              user,\n                                              cross_repo_copy,\n                                              need_progress);\n        if (need_progress && !task_id) {\n            seaf_warning (\"Failed to start copy task.\\n\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"failed to start copy task\");\n            ret = -1;\n            goto out;\n        }\n    } else {\n        /* Synchronous for cross-repo copy */\n        if (cross_repo_copy (src_repo_id,\n                             src_canon_path,\n                             src_filename,\n                             dst_repo_id,\n                             dst_canon_path,\n                             dst_filename,\n                             0,\n                             user,\n                             NULL) < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Failed to move\");\n            ret = -1;\n            goto out;\n        }\n    }\n\nout:\n    if (src_repo)\n        seaf_repo_unref (src_repo);\n    if (dst_repo)\n        seaf_repo_unref (dst_repo);\n    if (dst_head_commit)\n        seaf_commit_unref(dst_head_commit);\n    if (src_canon_path)\n        g_free (src_canon_path);\n    if (dst_canon_path)\n        g_free (dst_canon_path);\n    if (src_dent)\n        seaf_dirent_free(src_dent);\n    if (dst_dent)\n        seaf_dirent_free(dst_dent);\n\n    if (ret == 0) {\n        res = seafile_copy_result_new ();\n        g_object_set (res, \"background\", background, \"task_id\", task_id, NULL);\n        g_free (task_id);\n    }\n\n    return res;\n}\n\nstatic gboolean\ncheck_move (SeafRepo *src_repo, SeafRepo *dst_repo,\n            const char *src_path, const char *dst_path,\n            GList *src_names);\n\nSeafileCopyResult *\nseaf_repo_manager_copy_multiple_files (SeafRepoManager *mgr,\n                                       const char *src_repo_id,\n                                       const char *src_path,\n                                       const char *src_filenames,\n                                       const char *dst_repo_id,\n                                       const char *dst_path,\n                                       const char *dst_filenames,\n                                       const char *user,\n                                       int need_progress,\n                                       int synchronous,\n                                       GError **error)\n{\n    SeafRepo *src_repo = NULL, *dst_repo = NULL;\n    SeafDirent **src_dents = NULL, **dst_dents = NULL;\n    char *src_canon_path = NULL, *dst_canon_path = NULL;\n    SeafCommit *dst_head_commit = NULL;\n    int i = 0, ret = 0; \n    int file_num = 1; \n    gint64 *file_sizes = NULL;\n    gboolean background = FALSE;\n    char *task_id = NULL;\n    char *name;\n    GList *src_names = NULL, *dst_names = NULL, *ptr;\n    SeafileCopyResult *res = NULL;\n    GHashTable *dirent_hash = NULL;\n\n    GET_REPO_OR_FAIL(src_repo, src_repo_id);\n    \n    if (strcmp(src_repo_id, dst_repo_id) != 0) { \n        GET_REPO_OR_FAIL(dst_repo, dst_repo_id);\n    } else {\n        seaf_repo_ref (src_repo);\n        dst_repo = src_repo;\n    }\n\n    src_canon_path = get_canonical_path (src_path);\n    dst_canon_path = get_canonical_path (dst_path);\n\n    GET_COMMIT_OR_FAIL(dst_head_commit,\n                       dst_repo->id, dst_repo->version,\n                       dst_repo->head->commit_id);\n    /*FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version,\n                        dst_head_commit->root_id, dst_canon_path, dst_filename, NULL);*/\n\n    src_names = json_to_file_list (src_filenames);\n    dst_names = json_to_file_list (dst_filenames);\n    if (!src_names || !dst_names) {\n        ret = -1;\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Load filenames to json failed\");\n        goto out;\n    }\n    file_num = g_list_length (src_names);\n    int dst_file_num = g_list_length (dst_names);\n    if (dst_file_num != file_num) {\n        ret = -1;\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"The number of files in the parameters does not match\");\n        goto out;\n    }\n\n    /* copy file within the same repo */\n    if (src_repo == dst_repo ||\n        is_virtual_repo_and_origin (src_repo, dst_repo)) {\n\n        if (!check_move (src_repo, dst_repo, src_path, dst_path, src_names)) {\n            seaf_warning (\"Can not copy directory to its subdirectory\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Can not copy directory to its subdirectory\");\n            ret = -1;\n            goto out;\n        }\n\n        /* get src dirents */\n        src_dents = g_new0 (SeafDirent *, file_num);\n        file_sizes = g_new0 (gint64, file_num);\n\n        dirent_hash = get_sub_dirents_hash_map (src_repo, src_path);\n        if (!dirent_hash) {\n            ret = -1;\n            goto out;\n        }\n\n        for (ptr = src_names; ptr; ptr = ptr->next) {\n            name = ptr->data;\n            if (strcmp(name, \"\") == 0) {\n                ret = -1;\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Empty src_filenames\");\n                goto out;\n            }\n            src_dents[i] = g_hash_table_lookup(dirent_hash, name);\n            if (!src_dents[i]) {\n                ret = -1;\n                seaf_warning (\"[copy files] File %s not Found.\\n\", name);\n                goto out;\n            }\n            file_sizes[i] = (src_dents[i]->version > 0) ? src_dents[i]->size : -1;\n            i++;\n        }\n\n        dst_dents = g_new0 (SeafDirent *, file_num);\n        i = 0;\n        for (ptr = dst_names; ptr; ptr = ptr->next) {\n            name = ptr->data;\n            if (strcmp(name, \"\") == 0) {\n                ret = -1;\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Empty dst_filenames\");\n                goto out;\n            }\n            /* duplicate src dirents with new names */\n            dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version (dst_repo->version),\n                                            src_dents[i]->id, src_dents[i]->mode, name,\n                                            src_dents[i]->mtime, user, file_sizes[i]);\n            i++;\n        }\n        if (put_dirent_and_commit (dst_repo,\n                                   dst_canon_path,\n                                   dst_dents,\n                                   file_num,\n                                   0,\n                                   user,\n                                   FALSE,\n                                   NULL,\n                                   error) < 0) {\n            if (!error)\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"failed to put dirents\");\n            ret = -1;\n            goto out;\n        }\n\n        seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL);\n\n        update_repo_size (dst_repo_id);\n    } else {\n        /* copy between different repos */\n        if (!synchronous) {\n            background = TRUE;\n\n            task_id = seaf_copy_manager_add_task (seaf->copy_mgr,\n                                                  src_repo_id,\n                                                  src_canon_path,\n                                                  src_filenames,\n                                                  dst_repo_id,\n                                                  dst_canon_path,\n                                                  dst_filenames,\n                                                  0,\n                                                  user,\n                                                  cross_repo_copy,\n                                                  need_progress);\n            if (need_progress && !task_id) {\n                seaf_warning (\"Failed to start copy task.\\n\");\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                        \"failed to start copy task\");\n                ret = -1;\n                goto out; \n            }\n        } else {\n            /* Synchronous for cross-repo copy */\n            if (cross_repo_copy (src_repo_id,\n                                 src_canon_path,\n                                 src_filenames,\n                                 dst_repo_id,\n                                 dst_canon_path,\n                                 dst_filenames,\n                                 0,\n                                 user,\n                                 NULL) < 0) { \n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Failed to move\");\n                ret = -1;\n                goto out; \n            }\n        } // Synchronous copy\n    } //else diffrent repo\n\nout:\n    if (src_repo) seaf_repo_unref (src_repo);\n    if (dst_repo) seaf_repo_unref (dst_repo);\n\n    if (dst_head_commit) seaf_commit_unref(dst_head_commit);\n    \n    if (src_canon_path) g_free (src_canon_path);\n    if (dst_canon_path) g_free (dst_canon_path);\n\n    if (src_names)\n        string_list_free (src_names);\n    if (dst_names)\n        string_list_free (dst_names);\n    if (file_sizes)\n        g_free (file_sizes);\n    if (src_dents)\n        g_free (src_dents);\n    if (dst_dents) {\n        for (i = 0; i < file_num; i++)\n            seaf_dirent_free (dst_dents[i]);\n        g_free (dst_dents);\n    }\n    if (dirent_hash)\n        g_hash_table_unref(dirent_hash);\n    if (ret == 0) { \n        res = seafile_copy_result_new ();\n        g_object_set (res, \"background\", background, \"task_id\", task_id, NULL);\n        g_free (task_id);\n    }    \n\n    return res;\n}\n\nstatic int\nmove_file_same_repo (const char *repo_id,\n                     const char *src_filenames,\n                     const char *src_path, SeafDirent *src_dents[],\n                     const char *dst_path, SeafDirent *dst_dents[],\n                     int file_num,\n                     int replace,\n                     const char *user,\n                     GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *root_id_after_put = NULL, *root_id = NULL;\n    char buf[SEAF_PATH_MAX];\n    int ret = 0, i = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    root_id_after_put = head_commit->root_id;\n\n    GList *dent_list = NULL;\n    GList *name_list = NULL;\n    for (i = 0; i < file_num; i++) {\n        dent_list = g_list_append (dent_list, dst_dents[i]);\n    }    \n    if (*dst_path == '/') \n        dst_path = dst_path + 1; \n\n    root_id_after_put = post_multi_files_recursive (repo, head_commit->root_id, dst_path, dent_list, user,\n                                                    replace, &name_list);\n    g_list_free (dent_list);\n    g_list_free_full (name_list, (GDestroyNotify)g_free);\n\n    if (!root_id_after_put) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"move file failed\");\n        ret = -1;\n        goto out;\n    }\n    root_id = do_del_file (repo, root_id_after_put, src_path, src_filenames,\n                           NULL, NULL, NULL);\n\n    if (!root_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"move file failed\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    if (file_num > 1) {\n        snprintf(buf, SEAF_PATH_MAX, \"Moved \\\"%s\\\" and %d more files\",\n                                      src_dents[0]->name,file_num - 1);\n    } else if (S_ISDIR(src_dents[0]->mode)) {\n        snprintf(buf, SEAF_PATH_MAX, \"Moved directory \\\"%s\\\"\", src_dents[0]->name);\n    } else {\n        snprintf(buf, SEAF_PATH_MAX, \"Moved \\\"%s\\\"\", src_dents[0]->name);\n    }\n\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0)\n        ret = -1;\n    \nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n        \n    g_free (root_id_after_put);\n    g_free (root_id);\n    \n    return ret;\n}\n\nstatic int\ncross_repo_move (const char *src_repo_id,\n                 const char *src_path,\n                 const char *src_filename,\n                 const char *dst_repo_id,\n                 const char *dst_path,\n                 const char *dst_filename,\n                 int replace,\n                 const char *modifier,\n                 CopyTask *task)\n{\n    SeafRepo *src_repo = NULL, *dst_repo = NULL;\n    SeafDirent **src_dents = NULL, **dst_dents = NULL;\n    GList *src_names = NULL, *dst_names = NULL, *ptr;\n    char *name;\n    char *new_id = NULL;\n    guint64 new_size = 0;\n    int ret = 0, i = 0;\n    int file_num = 0;\n    GHashTable *dirent_hash = NULL;\n    gint64 total_size_all = 0;\n    char *err_str = COPY_ERR_INTERNAL;\n    int check_quota_ret;\n    SeafileCrypt *src_crypt = NULL;\n    SeafileCrypt *dst_crypt = NULL;\n    char *gc_id = NULL;\n\n    src_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, src_repo_id);\n    if (!src_repo) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        seaf_warning (\"Failed to get source repo.\\n\");\n        goto out;\n    }\n\n    if (src_repo->encrypted) {\n        src_crypt = get_crypt_by_repo (src_repo, modifier);\n        if (!src_crypt) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"The source repo is encrypted. Please provide password to view it.\\n\");\n            goto out;\n        }\n    }\n\n    dst_repo = seaf_repo_manager_get_repo (seaf->repo_mgr, dst_repo_id);\n    if (!dst_repo) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        seaf_warning (\"Failed to get destination repo.\\n\");\n        goto out;\n    }\n\n    if (dst_repo->encrypted) {\n        dst_crypt = get_crypt_by_repo (dst_repo, modifier);\n        if (!dst_crypt) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"The destination repo is encrypted. Please provide password to view it.\\n\");\n            goto out;\n        }\n    }\n\n    src_names = json_to_file_list (src_filename);\n    dst_names = json_to_file_list (dst_filename);\n    gc_id = seaf_repo_get_current_gc_id (dst_repo);\n\n    file_num = g_list_length (src_names);\n\n    src_dents = g_new0 (SeafDirent *, file_num);\n    dst_dents = g_new0 (SeafDirent *, file_num);\n\n    dirent_hash = get_sub_dirents_hash_map (src_repo, src_path);\n    if (!dirent_hash) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        goto out;\n    }\n\n    gint64 total_files = -1;\n    gint64 total_files_all = 0;\n    /* check filename, size and file count */\n    for (ptr = src_names; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        if (strcmp(name, \"\") == 0) { \n            err_str = COPY_ERR_BAD_ARG;\n            ret = -1;\n            seaf_warning (\"[move files] Bad args: Empty src_filename.\\n\");\n            goto out; \n        }    \n        src_dents[i] = g_hash_table_lookup (dirent_hash, name);\n        if (!src_dents[i]) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"[move files] File %s not Found.\\n\", name);\n            goto out; \n        }    \n        if (S_ISDIR(src_dents[i]->mode))\n            total_files = seaf_fs_manager_count_fs_files (seaf->fs_mgr,\n                                                          src_repo->store_id,\n                                                          src_repo->version,\n                                                          src_dents[i]->id);\n        else\n            total_files = 1;\n        if (total_files < 0) {\n            err_str = COPY_ERR_INTERNAL;\n            seaf_warning (\"Failed to get file count.\\n\");\n            ret = -1;\n            goto out;\n        }\n        total_files_all += total_files;\n        if (!check_file_count_and_size (src_repo, src_dents[i], total_files_all,\n                                        &total_size_all, &err_str)) {\n            ret = -1;\n            goto out;\n        }\n        i++;\n    }\n\n    check_quota_ret = seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, dst_repo_id, total_size_all);\n    if (check_quota_ret != 0) {\n        if (check_quota_ret == -1) {\n           err_str = COPY_ERR_INTERNAL;\n           seaf_warning (\"Failed to check quota.\\n\");\n        } else {\n           err_str = COPY_ERR_QUOTA_IS_FULL;\n        }\n        ret = -1;\n        goto out;\n    }\n\n    if (task)\n        task->total = total_files_all;\n\n    i = 0;\n    /* do copy */\n    for (ptr = dst_names; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        new_id = copy_recursive (src_repo, dst_repo, src_crypt, dst_crypt,\n                                 src_dents[i]->id, src_dents[i]->mode, modifier, task,\n                                 &new_size);\n        if (!new_id) {\n            err_str = COPY_ERR_INTERNAL;\n            ret = -1;\n            seaf_warning (\"[move files] Failed to copy file %s.\\n\", src_dents[i]->name);\n            goto out; \n        }    \n        dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version(dst_repo->version),\n                                        new_id, src_dents[i]->mode, name,\n                                        src_dents[i]->mtime, modifier, new_size);\n        g_free (new_id);\n        i++;\n    }    \n\n    if (put_dirent_and_commit (dst_repo,\n                               dst_path,\n                               dst_dents,\n                               file_num,\n                               replace,\n                               modifier,\n                               TRUE,\n                               gc_id,\n                               NULL) < 0) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, dst_repo_id, NULL);\n\n    if (seaf_repo_manager_del_file (seaf->repo_mgr, src_repo_id, src_path,\n                                    src_filename, modifier, NULL) < 0) {\n        err_str = COPY_ERR_INTERNAL;\n        ret = -1;\n        goto out;\n    }\n\n    if (task)\n        task->successful = TRUE;\n\n    seaf_repo_manager_merge_virtual_repo (seaf->repo_mgr, src_repo_id, NULL);\n\nout:\n    if (src_repo)\n        seaf_repo_unref (src_repo);\n    if (dst_repo)\n        seaf_repo_unref (dst_repo);\n    g_free (src_crypt);\n    g_free (dst_crypt);\n    if (dirent_hash)\n        g_hash_table_unref(dirent_hash);\n    g_free (src_dents);\n    for (i = 0; i < file_num; i++)\n        seaf_dirent_free(dst_dents[i]);\n    g_free (dst_dents);\n    if (src_names)\n        string_list_free (src_names);\n    if (dst_names)\n        string_list_free (dst_names);\n    g_free (gc_id);\n\n    if (ret == 0) {\n        update_repo_size (dst_repo_id);\n    } else {\n        if (task && !task->canceled) {\n            task->failed = TRUE;\n            set_failed_reason (&(task->failed_reason), err_str);\n        }\n    }\n\n    return ret;\n}\n                     \nstatic gboolean\ncheck_move (SeafRepo *src_repo, SeafRepo *dst_repo,\n            const char *src_path, const char *dst_path,\n            GList *src_names)\n{\n    char *dst_dirent_path =  NULL;\n    int len;\n    gboolean ret = TRUE;\n\n    if (dst_repo->virtual_info) {\n        dst_dirent_path = g_build_path (\"/\", dst_repo->virtual_info->path, dst_path, NULL);\n    } else {\n        dst_dirent_path = g_strdup (dst_path);\n    }\n\n    GList *ptr;\n    char *src_dirent_path = NULL;\n    char *name;\n    for (ptr = src_names; ptr; ptr = ptr->next) {\n        name = ptr->data;\n        if (src_repo->virtual_info) {\n            src_dirent_path = g_build_path (\"/\", src_repo->virtual_info->path, src_path, name, \"/\", NULL);\n        } else {\n            src_dirent_path = g_build_path (\"/\", src_path, name, \"/\", NULL);\n        }\n        len = strlen(src_dirent_path);\n        if (strncmp (dst_dirent_path, src_dirent_path, len) == 0) {\n            g_free (src_dirent_path);\n            ret = FALSE;\n            goto out;\n        }\n        g_free (src_dirent_path);\n    }\nout:\n    g_free (dst_dirent_path);\n    return ret;\n}\n\nSeafileCopyResult *\nseaf_repo_manager_move_multiple_files (SeafRepoManager *mgr,\n                                       const char *src_repo_id,\n                                       const char *src_path,\n                                       const char *src_filenames,\n                                       const char *dst_repo_id,\n                                       const char *dst_path,\n                                       const char *dst_filenames,\n                                       int replace,\n                                       const char *user,\n                                       int need_progress,\n                                       int synchronous,\n                                       GError **error)\n{\n    SeafRepo *src_repo = NULL, *dst_repo = NULL;\n    SeafDirent **src_dents = NULL, **dst_dents = NULL;\n    char *src_canon_path = NULL, *dst_canon_path = NULL;\n    SeafCommit *dst_head_commit = NULL;\n    int i = 0, ret = 0; \n    int file_num = 1; \n    gint64 *file_sizes = NULL;\n    gboolean background = FALSE;\n    char *task_id = NULL;\n    char *name;\n    GList *src_names = NULL, *dst_names = NULL, *ptr;\n    SeafileCopyResult *res = NULL;\n    GHashTable *dirent_hash = NULL;\n\n    GET_REPO_OR_FAIL(src_repo, src_repo_id);\n    \n    if (strcmp(src_repo_id, dst_repo_id) != 0) { \n        GET_REPO_OR_FAIL(dst_repo, dst_repo_id);\n    } else {\n        seaf_repo_ref (src_repo);\n        dst_repo = src_repo;\n    }\n\n    src_canon_path = get_canonical_path (src_path);\n    dst_canon_path = get_canonical_path (dst_path);\n\n    GET_COMMIT_OR_FAIL(dst_head_commit,\n                       dst_repo->id, dst_repo->version,\n                       dst_repo->head->commit_id);\n    /*FAIL_IF_FILE_EXISTS(dst_repo->store_id, dst_repo->version,\n                        dst_head_commit->root_id, dst_canon_path, dst_filename, NULL);*/\n\n    src_names = json_to_file_list (src_filenames);\n    dst_names = json_to_file_list (dst_filenames);\n    if (!src_names || !dst_names) {\n        ret = -1;\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Load filenames to json failed\");\n        goto out;\n    }\n\n    file_num = g_list_length (src_names);\n    int dst_file_num = g_list_length (dst_names);\n    if (dst_file_num != file_num) {\n        ret = -1;\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"The number of files in the parameters does not match\");\n        goto out;\n    }\n\n    gboolean is_virtual_origin = is_virtual_repo_and_origin (src_repo, dst_repo);\n    if (src_repo == dst_repo || is_virtual_origin) {\n        /* get src dirents */\n\n        if (!check_move (src_repo, dst_repo, src_path, dst_path, src_names)) {\n            seaf_warning (\"Can not move copy directory to its subdirectory\");\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Can not move directory to its subdirectory\");\n            ret = -1;\n            goto out;\n        }\n\n        src_dents = g_new0 (SeafDirent *, file_num);\n        file_sizes = g_new0 (gint64, file_num);\n\n        dirent_hash = get_sub_dirents_hash_map (src_repo, src_path);\n        if (!dirent_hash) {\n            ret = -1;\n            goto out;\n        }\n\n        for (ptr = src_names; ptr; ptr = ptr->next) {\n            name = ptr->data;\n            if (strcmp(name, \"\") == 0) {\n                ret = -1;\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Empty src_filenames\");\n                goto out;\n            }\n            src_dents[i] = g_hash_table_lookup(dirent_hash, name);\n            if (!src_dents[i]) {\n                ret = -1;\n                seaf_warning (\"[move files] File %s not Found.\\n\", name);\n                goto out;\n            }\n            file_sizes[i] = (src_dents[i]->version > 0) ? src_dents[i]->size : -1;\n            i++;\n        }\n    \n        dst_dents = g_new0 (SeafDirent *, file_num);\n        i = 0;\n        for (ptr = dst_names; ptr; ptr = ptr->next) {\n            name = ptr->data;\n            if (strcmp(name, \"\") == 0) {\n                ret = -1;\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Empty dst_filenames\");\n                goto out;\n            }\n            /* duplicate src dirents with new names */\n            dst_dents[i] = seaf_dirent_new (dir_version_from_repo_version (dst_repo->version),\n                                            src_dents[i]->id, src_dents[i]->mode, name,\n                                            src_dents[i]->mtime, user, file_sizes[i]);\n            i++;\n        }\n        /* move file within the same repo */\n        if (src_repo == dst_repo) {\n            if (move_file_same_repo (src_repo_id,\n                                     src_filenames,\n                                     src_canon_path, src_dents,\n                                     dst_canon_path, dst_dents,\n                                     file_num, replace, user, error) < 0) {\n                ret = -1;\n                goto out;\n            }\n        } else {\n            /* move between virtual and origin repo */\n            if (put_dirent_and_commit (dst_repo,\n                                       dst_path,\n                                       dst_dents,\n                                       file_num,\n                                       replace,\n                                       user,\n                                       FALSE,\n                                       NULL,\n                                       NULL) < 0) {\n                ret = -1;\n                goto out;\n            }\n            seaf_repo_manager_merge_virtual_repo (mgr, dst_repo->id, NULL);\n\n            if (seaf_repo_manager_del_file (mgr, src_repo->id, src_path,\n                                            src_filenames, user, error) < 0) {\n                ret = -1;\n                goto out;\n            }\n        }\n        seaf_repo_manager_merge_virtual_repo (mgr, src_repo_id, NULL);\n\n        update_repo_size (dst_repo_id);\n    } else {\n        /* move between different repos */\n        if (!synchronous) {\n            background = TRUE;\n\n            task_id = seaf_copy_manager_add_task (seaf->copy_mgr,\n                                                  src_repo_id,\n                                                  src_canon_path,\n                                                  src_filenames,\n                                                  dst_repo_id,\n                                                  dst_canon_path,\n                                                  dst_filenames,\n                                                  0,\n                                                  user,\n                                                  cross_repo_move,\n                                                  need_progress);\n            if (need_progress && !task_id) {\n                seaf_warning (\"Failed to start copy task.\\n\");\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                        \"failed to start copy task\");\n                ret = -1;\n                goto out; \n            }\n        } else {\n            /* Synchronous for cross-repo move */\n            if (cross_repo_move (src_repo_id,\n                                 src_canon_path,\n                                 src_filenames,\n                                 dst_repo_id,\n                                 dst_canon_path,\n                                 dst_filenames,\n                                 replace,\n                                 user,\n                                 NULL) < 0) { \n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Failed to move\");\n                ret = -1;\n                goto out; \n            }\n        } // Synchronous move\n    } //else diffrent repo\n\nout:\n    if (src_repo) seaf_repo_unref (src_repo);\n    if (dst_repo) seaf_repo_unref (dst_repo);\n\n    if (dst_head_commit) seaf_commit_unref(dst_head_commit);\n    \n    if (src_canon_path) g_free (src_canon_path);\n    if (dst_canon_path) g_free (dst_canon_path);\n\n    if (src_names)\n        string_list_free (src_names);\n    if (dst_names)\n        string_list_free (dst_names);\n    if (file_sizes)\n        g_free (file_sizes);\n\n    if (dirent_hash)\n        g_hash_table_unref(dirent_hash);\n    if (src_dents)\n        g_free (src_dents);\n    if (dst_dents) {\n        for (i = 0; i < file_num; i++)\n            seaf_dirent_free (dst_dents[i]);\n        g_free (dst_dents);\n    }\n    \n    if (ret == 0) { \n        res = seafile_copy_result_new ();\n        g_object_set (res, \"background\", background, \"task_id\", task_id, NULL);\n        g_free (task_id);\n    }    \n\n    return res;\n}\n\nint\nseaf_repo_manager_mkdir_with_parents (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *parent_dir,\n                                      const char *new_dir_path,\n                                      const char *user,\n                                      GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char **sub_folders = NULL;\n    int nfolder;\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafDirent *new_dent = NULL;\n    char *parent_dir_can = NULL;\n    char *relative_dir_can = NULL;\n    char *abs_path = NULL;\n    int total_path_len;\n    int sub_folder_len;\n    GList *uncre_dir_list = NULL;\n    GList *iter_list = NULL;\n    char *uncre_dir;\n    int ret = 0; \n\n    if (new_dir_path[0] == '/' || new_dir_path[0] == '\\\\') {\n        seaf_warning (\"[mkdir with parent] Invalid relative path %s.\\n\", new_dir_path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid relative path\");\n        return -1;\n    }    \n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    relative_dir_can = get_canonical_path (new_dir_path);\n    sub_folders = g_strsplit (relative_dir_can, \"/\", 0);\n    nfolder = g_strv_length (sub_folders);\n\n    int i = 0;\n    for (; i < nfolder; ++i) {\n        if (strcmp (sub_folders[i], \"\") == 0)\n            continue;\n\n        if (should_ignore_file (sub_folders[i], NULL)) {\n            seaf_warning (\"[post dir] Invalid dir name %s.\\n\", sub_folders[i]);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Invalid dir name\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (strcmp (parent_dir, \"/\") == 0 ||\n        strcmp (parent_dir, \"\\\\\") == 0) {\n        parent_dir_can = g_strdup (\"/\");\n        abs_path = g_strdup_printf (\"%s%s\", parent_dir_can, relative_dir_can);\n    } else {\n        parent_dir_can = get_canonical_path (parent_dir);\n        abs_path = g_strdup_printf (\"%s/%s\", parent_dir_can, relative_dir_can);\n    }\n    if (!abs_path) {\n        seaf_warning (\"[mkdir with parent] Out of memory.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Out of memory\");\n        ret = -1;\n        goto out;\n    }\n    total_path_len = strlen (abs_path);\n\n    // from the last, to check the folder exist\n    i = nfolder - 1;\n    for (; i >= 0; --i) {\n        if (strcmp (sub_folders[i], \"\") == 0)\n            continue;\n\n        sub_folder_len = strlen (sub_folders[i]) + 1;\n        total_path_len -= sub_folder_len;\n        memset (abs_path + total_path_len, '\\0', sub_folder_len);\n\n        if (check_file_exists (repo->store_id, repo->version,\n                               head_commit->root_id, abs_path, sub_folders[i], NULL)) {\n            // folder exist, skip loop to create unexist subfolder\n            strcat (abs_path, \"/\");\n            strcat (abs_path, sub_folders[i]);\n            break;\n        } else {\n            // folder not exist, cache it to create later\n            uncre_dir_list = g_list_prepend (uncre_dir_list, sub_folders[i]);\n        }\n    }\n\n    if (uncre_dir_list) {\n        // exist parent folder has been found, based on it to create unexist subfolder\n        char new_root_id[41];\n        memcpy (new_root_id, head_commit->root_id, 40);\n        new_root_id[40] = '\\0';\n\n        for (iter_list = uncre_dir_list; iter_list; iter_list = iter_list->next) {\n            uncre_dir = iter_list->data;\n            new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                        EMPTY_SHA1, S_IFDIR, uncre_dir,\n                                        (gint64)time(NULL), NULL, -1);\n\n            root_id = do_post_file (repo,\n                                    new_root_id, abs_path, new_dent);\n            if (!root_id) {\n                seaf_warning (\"[put dir] Failed to put dir.\\n\");\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Failed to put dir\");\n                ret = -1;\n                seaf_dirent_free (new_dent);\n                goto out;\n            }\n\n            // the last folder has been created\n            if (!iter_list->next) {\n                seaf_dirent_free (new_dent);\n                break;\n            }\n\n            strcat (abs_path, \"/\");\n            strcat (abs_path, uncre_dir);\n            memcpy (new_root_id, root_id, 40);\n\n            seaf_dirent_free (new_dent);\n            g_free (root_id);\n        }\n\n        /* Commit. */\n        snprintf(buf, SEAF_PATH_MAX, \"Added directory \\\"%s\\\"\", relative_dir_can);\n        if (gen_new_commit (repo_id, head_commit, root_id,\n                            user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n            ret = -1;\n            g_free (root_id);\n            goto out;\n        }\n\n        seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n        g_free (root_id);\n    }\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    if (sub_folders)\n        g_strfreev (sub_folders);\n    if (uncre_dir_list)\n        g_list_free (uncre_dir_list);\n    if (relative_dir_can)\n        g_free (relative_dir_can);\n    if (parent_dir_can)\n        g_free (parent_dir_can);\n    if (abs_path)\n        g_free (abs_path);\n\n    return ret;\n}\n\nint\nseaf_repo_manager_post_dir (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *parent_dir,\n                            const char *new_dir_name,\n                            const char *user,\n                            GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafDirent *new_dent = NULL;\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (new_dir_name, NULL)) {\n        seaf_warning (\"[post dir] Invalid dir name %s.\\n\", new_dir_name);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid dir name\");\n        ret = -1;\n        goto out;\n    }\n\n    FAIL_IF_FILE_EXISTS(repo->store_id, repo->version,\n                        head_commit->root_id, canon_path, new_dir_name, NULL);\n\n    if (!new_dent) {\n        new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                    EMPTY_SHA1, S_IFDIR, new_dir_name,\n                                    (gint64)time(NULL), NULL, -1);\n    }\n\n    root_id = do_post_file (repo,\n                            head_commit->root_id, canon_path, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[put dir] Failed to put dir %s to %s in repo %s.\\n\",\n                      new_dir_name, canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to put dir\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    snprintf(buf, SEAF_PATH_MAX, \"Added directory \\\"%s\\\"\", new_dir_name);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    seaf_dirent_free (new_dent);\n    g_free (root_id);\n    g_free (canon_path);\n\n    return ret;\n}\n\nint\nseaf_repo_manager_post_empty_file (SeafRepoManager *mgr,\n                                   const char *repo_id,\n                                   const char *parent_dir,\n                                   const char *new_file_name,\n                                   const char *user,\n                                   GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafDirent *new_dent = NULL;\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    if (!canon_path)\n        /* no need to call get_canonical_path again when retry */\n        canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (new_file_name, NULL)) {\n        seaf_warning (\"[post file] Invalid file name %s.\\n\", new_file_name);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid file name\");\n        ret = -1;\n        goto out;\n    }\n\n    FAIL_IF_FILE_EXISTS(repo->store_id, repo->version,\n                        head_commit->root_id, canon_path, new_file_name, NULL);\n\n    if (!new_dent) {\n        new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                    EMPTY_SHA1, STD_FILE_MODE, new_file_name,\n                                    (gint64)time(NULL), user, 0);\n    }\n\n    root_id = do_post_file (repo,\n                            head_commit->root_id, canon_path, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[put dir] Failed to create empty file dir.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to put dir\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    snprintf(buf, SEAF_PATH_MAX, \"Added \\\"%s\\\"\", new_file_name);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\n    update_repo_size (repo_id);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    seaf_dirent_free (new_dent);\n    g_free (root_id);\n    g_free (canon_path);\n\n    return ret;\n}\n\nstatic char *\nrename_file_recursive(SeafRepo *repo,\n                      const char *dir_id,\n                      const char *to_path,\n                      const char *oldname,\n                      const char *newname)\n{\n    SeafDir *olddir, *newdir;\n    SeafDirent *dent;\n    GList *ptr;\n    char *to_path_dup = NULL;\n    char *remain = NULL;\n    char *slash;\n    char *id = NULL;\n    char *ret = NULL;\n\n    olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr,\n                                                repo->store_id, repo->version,\n                                                dir_id);\n    if (!olddir)\n        return NULL;\n\n    /* we reach the target dir. */\n    if (*to_path == '\\0') {\n        SeafDirent *old, *newdent = NULL;\n        GList *newentries = NULL, *p;\n\n        /* When renameing, there is a pitfall: we can't simply rename the\n         * dirent, since the dirents are required to be sorted in descending\n         * order. We need to copy all old dirents except the target dirent,\n         * and then rename the target dirent, and then insert the new\n         * dirent, so that we can maintain the descending order of dirents. */\n        for (p = olddir->entries; p != NULL; p = p->next) {\n            old = p->data;\n            if (strcmp(old->name, oldname) != 0) {\n                newentries = g_list_prepend (newentries, seaf_dirent_dup(old));\n            } else {\n                newdent = seaf_dirent_new (old->version, old->id, old->mode,\n                                           newname, old->mtime,\n                                           old->modifier, old->size);\n            }\n        }\n\n        newentries = g_list_reverse (newentries);\n\n        if (newdent) {\n            newentries = g_list_insert_sorted(newentries, newdent, compare_dirents);\n        }\n\n        newdir = seaf_dir_new (NULL, newentries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strndup (newdir->dir_id, 40);\n        seaf_dir_free (newdir);\n\n        goto out;\n    }\n\n    to_path_dup = g_strdup (to_path);\n    slash = strchr (to_path_dup, '/');\n\n    if (!slash) {\n        remain = to_path_dup + strlen(to_path_dup);\n    } else {\n        *slash = '\\0';\n        remain = slash + 1;\n    }\n\n    for (ptr = olddir->entries; ptr; ptr = ptr->next) {\n        dent = (SeafDirent *)ptr->data;\n\n        if (strcmp(dent->name, to_path_dup) != 0)\n            continue;\n\n        id = rename_file_recursive (repo, dent->id, remain, oldname, newname);\n        if (id != NULL) {\n            memcpy(dent->id, id, 40);\n            dent->id[40] = '\\0';\n        }\n        break;\n    }\n    \n    if (id != NULL) {\n        /* Create a new SeafDir. */\n        GList *new_entries;\n        \n        new_entries = dup_seafdir_entries (olddir->entries);\n        newdir = seaf_dir_new (NULL, new_entries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup(newdir->dir_id);\n        seaf_dir_free (newdir);\n    }\n\nout:\n    g_free (to_path_dup);\n    g_free (id);\n    seaf_dir_free(olddir);\n    return ret;\n}\n\nstatic char *\ndo_rename_file(SeafRepo *repo,\n               const char *root_id,\n               const char *parent_dir,\n               const char *oldname,\n               const char *newname)\n{\n    /* if parent_dir is a absolutely path, we will remove the first '/' */\n    if (*parent_dir == '/')\n        parent_dir = parent_dir + 1;\n\n    return rename_file_recursive(repo, root_id, parent_dir, oldname, newname);\n}\n\n\nint\nseaf_repo_manager_rename_file (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *parent_dir,\n                               const char *oldname,\n                               const char *newname,\n                               const char *user,\n                               GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *root_id = NULL;\n    char *canon_path = NULL;\n    char buf[SEAF_PATH_MAX];\n    int mode = 0;\n    int ret = 0;\n\n    if (strcmp(oldname, newname) == 0)\n        return 0;\n    \n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n    \n    if (!canon_path)\n        canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (newname, NULL)) {\n        seaf_warning (\"[rename file] Invalid filename %s.\\n\", newname);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid filename\");\n        ret = -1;\n        goto out;\n    }\n\n    FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version,\n                            head_commit->root_id, canon_path, oldname, &mode);\n    FAIL_IF_FILE_EXISTS(repo->store_id, repo->version,\n                        head_commit->root_id, canon_path, newname, NULL);\n\n    root_id = do_rename_file (repo, head_commit->root_id, canon_path,\n                              oldname, newname);\n    if (!root_id) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"faile to rename file %s\", oldname);\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    if (S_ISDIR(mode)) {\n        snprintf(buf, SEAF_PATH_MAX, \"Renamed directory \\\"%s\\\"\", oldname);\n    } else {\n        snprintf(buf, SEAF_PATH_MAX, \"Renamed \\\"%s\\\"\", oldname);\n    }\n\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n    g_free (canon_path);\n    g_free (root_id);\n\n    return ret;\n}\n\nstatic char *\nput_file_recursive(SeafRepo *repo,\n                   const char *dir_id,\n                   const char *to_path,\n                   SeafDirent *newdent)\n{\n    SeafDir *olddir, *newdir;\n    SeafDirent *dent;\n    GList *ptr;\n    char *to_path_dup = NULL;\n    char *remain = NULL;\n    char *slash;\n    char *id = NULL;\n    char *ret = NULL;\n\n    olddir = seaf_fs_manager_get_seafdir_sorted(seaf->fs_mgr,\n                                                repo->store_id, repo->version,\n                                                dir_id);\n    if (!olddir)\n        return NULL;\n\n    /* we reach the target dir. Update the target dirent. */\n    if (*to_path == '\\0') {\n        GList *newentries = NULL, *p;\n        SeafDirent *dent;\n\n        for (p = olddir->entries; p; p = p->next) {\n            dent = p->data;\n            if (strcmp(dent->name, newdent->name) == 0) {\n                newentries = g_list_prepend (newentries, seaf_dirent_dup(newdent));\n            } else {\n                newentries = g_list_prepend (newentries, seaf_dirent_dup(dent));\n            }\n        }\n\n        newentries = g_list_reverse (newentries);\n        newdir = seaf_dir_new (NULL, newentries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup (newdir->dir_id);\n        seaf_dir_free (newdir);\n\n        goto out;\n    }\n\n    to_path_dup = g_strdup (to_path);\n    slash = strchr (to_path_dup, '/');\n\n    if (!slash) {\n        remain = to_path_dup + strlen(to_path_dup);\n    } else {\n        *slash = '\\0';\n        remain = slash + 1;\n    }\n\n    for (ptr = olddir->entries; ptr; ptr = ptr->next) {\n        dent = (SeafDirent *)ptr->data;\n\n        if (strcmp(dent->name, to_path_dup) != 0)\n            continue;\n\n        id = put_file_recursive (repo, dent->id, remain, newdent);\n        if (id != NULL) {\n            memcpy(dent->id, id, 40);\n            dent->id[40] = '\\0';\n            if (repo->version > 0)\n                dent->mtime = (guint64)time(NULL);\n        }\n        break;\n    }\n    \n    if (id != NULL) {\n        /* Create a new SeafDir. */\n        GList *new_entries;\n        \n        new_entries = dup_seafdir_entries (olddir->entries);\n        newdir = seaf_dir_new (NULL, new_entries,\n                               dir_version_from_repo_version(repo->version));\n        if (seaf_dir_save (seaf->fs_mgr, repo->store_id, repo->version, newdir) == 0)\n            ret = g_strdup(newdir->dir_id);\n        seaf_dir_free (newdir);\n    }\n\nout:\n    g_free (to_path_dup);\n    g_free (id);\n    seaf_dir_free(olddir);\n    return ret;\n}\n\nstatic char *\ndo_put_file (SeafRepo *repo,\n             const char *root_id,\n             const char *parent_dir,\n             SeafDirent *dent)\n{\n    /* if parent_dir is a absolutely path, we will remove the first '/' */\n    if (*parent_dir == '/')\n        parent_dir = parent_dir + 1;\n\n    return put_file_recursive(repo, root_id, parent_dir, dent);\n}\n\nint\nseaf_repo_manager_put_file (SeafRepoManager *mgr,\n                            const char *repo_id,\n                            const char *temp_file_path,\n                            const char *parent_dir,\n                            const char *file_name,\n                            const char *user,\n                            const char *head_id,\n                            gint64 mtime,\n                            char **new_file_id,\n                            GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    unsigned char sha1[20];\n    char buf[SEAF_PATH_MAX];\n    char *root_id = NULL;\n    SeafileCrypt *crypt = NULL;\n    SeafDirent *new_dent = NULL;\n    char hex[41];\n    char *old_file_id = NULL, *fullpath = NULL;\n    char *gc_id = NULL;\n    int ret = 0;\n\n    if (g_access (temp_file_path, R_OK) != 0) {\n        seaf_warning (\"[put file] File %s doesn't exist or not readable.\\n\",\n                      temp_file_path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid input file\");\n        return -1;\n    }\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    const char *base = head_id ? head_id : repo->head->commit_id;\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base);\n\n    if (!canon_path)\n        canon_path = get_canonical_path (parent_dir);\n\n    if (should_ignore_file (file_name, NULL)) {\n        seaf_warning (\"[put file] Invalid filename %s.\\n\", file_name);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid filename\");\n        ret = -1;\n        goto out;\n    }\n\n    if (strstr (parent_dir, \"//\") != NULL) {\n        seaf_warning (\"[put file] parent_dir cantains // sequence.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                     \"Invalid parent dir\");\n        ret = -1;\n        goto out;\n    }\n    \n    FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version,\n                            head_commit->root_id, canon_path, file_name, NULL);\n\n    /* Write blocks. */\n    if (repo->encrypted) {\n        unsigned char key[32], iv[16];\n        if (seaf_passwd_manager_get_decrypt_key_raw (seaf->passwd_mgr,\n                                                     repo_id, user,\n                                                     key, iv) < 0) {\n            seaf_warning (\"Passwd for repo %s is not set.\\n\", repo_id);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Passwd is not set\");\n            ret = -1;\n            goto out;\n        }\n        crypt = seafile_crypt_new (repo->enc_version, key, iv);\n    }\n\n    gc_id = seaf_repo_get_current_gc_id (repo);\n\n    gint64 size;\n    if (seaf_fs_manager_index_blocks (seaf->fs_mgr,\n                                      repo->store_id, repo->version,\n                                      temp_file_path,\n                                      sha1, &size, crypt, TRUE, FALSE, NULL) < 0) {\n        seaf_warning (\"failed to index blocks\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to index blocks\");\n        ret = -1;\n        goto out;\n    }\n        \n    rawdata_to_hex(sha1, hex, 20);\n    if (mtime <= 0) {\n        mtime = (gint64)time(NULL);\n    }\n    new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                hex, STD_FILE_MODE, file_name,\n                                mtime, user, size);\n\n    if (!fullpath)\n        fullpath = g_build_filename(parent_dir, file_name, NULL);\n\n    old_file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                  repo->store_id, repo->version,\n                                                  head_commit->root_id,\n                                                  fullpath, NULL, NULL);\n\n    if (g_strcmp0(old_file_id, new_dent->id) == 0) {\n        if (new_file_id)\n            *new_file_id = g_strdup(new_dent->id);\n        goto out;\n    }\n\n    root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[put file] Failed to put file %s to %s in repo %s.\\n\",\n                      file_name, canon_path, repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to put file\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    snprintf(buf, SEAF_PATH_MAX, \"Modified \\\"%s\\\"\", file_name);\n    if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, TRUE, TRUE, gc_id, error) < 0) {\n        ret = -1;\n        goto out;       \n    }\n\n    if (new_file_id)\n        *new_file_id = g_strdup(new_dent->id);\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    seaf_dirent_free (new_dent);\n    g_free (root_id);\n    g_free (canon_path);\n    g_free (crypt);\n    g_free (old_file_id);\n    g_free (fullpath);\n    g_free (gc_id);\n\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\nstatic char *\ngen_commit_description (SeafRepo *repo,\n                        const char *root,\n                        const char *parent_root)\n{\n    GList *p;\n    GList *results = NULL;\n    char *desc;\n    \n    diff_commit_roots (repo->store_id, repo->version,\n                       parent_root, root, &results, TRUE);\n\n    desc = diff_results_to_description (results);\n\n    for (p = results; p; p = p->next) {\n        DiffEntry *de = p->data;\n        diff_entry_free (de);\n    }\n    g_list_free (results);\n\n    return desc;\n}\n\nint\nseaf_repo_manager_update_dir (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *dir_path,\n                              const char *new_dir_id,\n                              const char *user,\n                              const char *head_id,\n                              char *new_commit_id,\n                              GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    char *canon_path = NULL;\n    char *parent = NULL, *dirname = NULL;\n    SeafDirent *new_dent = NULL;\n    char *root_id = NULL;\n    char *commit_desc = NULL;\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    const char *base = head_id ? head_id : repo->head->commit_id;\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base);\n\n    /* Are we updating the root? */\n    if (strcmp (dir_path, \"/\") == 0) {\n        commit_desc = gen_commit_description (repo, new_dir_id, head_commit->root_id);\n        if (!commit_desc)\n            commit_desc = g_strdup(\"Auto merge by system\");\n\n        if (gen_new_commit (repo_id, head_commit, new_dir_id,\n                            user, commit_desc, new_commit_id, TRUE, FALSE, NULL, error) < 0)\n            ret = -1;\n        g_free (commit_desc);\n        goto out;\n    }\n\n    parent = g_path_get_dirname (dir_path);\n    canon_path = get_canonical_path (parent);\n    g_free (parent);\n\n    dirname = g_path_get_basename (dir_path);\n\n    FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version,\n                            head_commit->root_id, canon_path, dirname, NULL);\n\n    new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version),\n                                new_dir_id, S_IFDIR, dirname,\n                                (gint64)time(NULL), NULL, -1);\n\n    root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent);\n    if (!root_id) {\n        seaf_warning (\"[update dir] Failed to put file.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to update dir\");\n        ret = -1;\n        goto out;\n    }\n\n    commit_desc = gen_commit_description (repo, root_id, head_commit->root_id);\n    if (!commit_desc)\n        commit_desc = g_strdup(\"Auto merge by system\");\n\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, commit_desc, new_commit_id, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        g_free (commit_desc);\n        goto out;\n    }\n    g_free (commit_desc);\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (head_commit);\n    seaf_dirent_free (new_dent);\n    g_free (canon_path);\n    g_free (dirname);\n    g_free (root_id);\n\n    if (ret == 0)\n        update_repo_size (repo_id);\n\n    return ret;\n}\n\n/* int */\n/* seaf_repo_manager_put_file_blocks (SeafRepoManager *mgr, */\n/*                                    const char *repo_id, */\n/*                                    const char *parent_dir, */\n/*                                    const char *file_name, */\n/*                                    const char *blockids_json, */\n/*                                    const char *paths_json, */\n/*                                    const char *user, */\n/*                                    const char *head_id, */\n/*                                    gint64 file_size, */\n/*                                    char **new_file_id, */\n/*                                    GError **error) */\n/* { */\n/*     SeafRepo *repo = NULL; */\n/*     SeafCommit *head_commit = NULL; */\n/*     char *canon_path = NULL; */\n/*     unsigned char sha1[20]; */\n/*     char buf[SEAF_PATH_MAX]; */\n/*     char *root_id = NULL; */\n/*     SeafDirent *new_dent = NULL; */\n/*     char hex[41]; */\n/*     GList *blockids = NULL, *paths = NULL, *ptr; */\n/*     char *old_file_id = NULL, *fullpath = NULL; */\n/*     int ret = 0; */\n\n/*     blockids = json_to_file_list (blockids_json); */\n/*     paths = json_to_file_list (paths_json); */\n/*     if (g_list_length(blockids) != g_list_length(paths)) { */\n/*         seaf_warning (\"[put-blks] Invalid blockids or paths.\\n\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Invalid files\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n\n/*     for (ptr = paths; ptr; ptr = ptr->next) { */\n/*         char *temp_file_path = ptr->data; */\n/*         if (g_access (temp_file_path, R_OK) != 0) { */\n/*             seaf_warning (\"[put-blks] File block %s doesn't exist or not readable.\\n\", */\n/*                           temp_file_path); */\n/*             g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                          \"Invalid input file\"); */\n/*             ret = -1; */\n/*             goto out; */\n/*         } */\n/*     } */\n\n/*     GET_REPO_OR_FAIL(repo, repo_id); */\n/*     const char *base = head_id ? head_id : repo->head->commit_id; */\n/*     GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, base); */\n\n/*     if (!canon_path) */\n/*         canon_path = get_canonical_path (parent_dir); */\n\n/*     if (should_ignore_file (file_name, NULL)) { */\n/*         seaf_warning (\"[put-blks] Invalid filename %s.\\n\", file_name); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Invalid filename\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     if (strstr (parent_dir, \"//\") != NULL) { */\n/*         seaf_warning (\"[put-blks] parent_dir cantains // sequence.\\n\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, */\n/*                      \"Invalid parent dir\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     FAIL_IF_FILE_NOT_EXISTS(repo->store_id, repo->version, */\n/*                             head_commit->root_id, canon_path, file_name, NULL); */\n\n/*     /\\* Write blocks. *\\/ */\n/*     if (seaf_fs_manager_index_file_blocks (seaf->fs_mgr, */\n/*                                            repo->store_id, repo->version, */\n/*                                            paths, */\n/*                                            blockids, sha1, file_size) < 0) { */\n/*         seaf_warning (\"failed to index blocks\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */\n/*                      \"Failed to index blocks\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     rawdata_to_hex(sha1, hex, 20); */\n/*     new_dent = seaf_dirent_new (dir_version_from_repo_version(repo->version), */\n/*                                 hex, STD_FILE_MODE, file_name, */\n/*                                 (gint64)time(NULL), user, file_size); */\n\n/*     if (!fullpath) */\n/*         fullpath = g_build_filename(parent_dir, file_name, NULL); */\n\n/*     old_file_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr, */\n/*                                                   repo->store_id, repo->version, */\n/*                                                   head_commit->root_id, */\n/*                                                   fullpath, NULL, NULL); */\n\n/*     if (g_strcmp0(old_file_id, new_dent->id) == 0) { */\n/*         if (new_file_id) */\n/*             *new_file_id = g_strdup(new_dent->id); */\n/*         goto out; */\n/*     } */\n\n/*     root_id = do_put_file (repo, head_commit->root_id, canon_path, new_dent); */\n/*     if (!root_id) { */\n/*         seaf_warning (\"[put-blks] Failed to put file.\\n\"); */\n/*         g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, */\n/*                      \"Failed to put file\"); */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     /\\* Commit. *\\/ */\n/*     snprintf(buf, SEAF_PATH_MAX, \"Modified \\\"%s\\\"\", file_name); */\n/*     if (gen_new_commit (repo_id, head_commit, root_id, user, buf, NULL, error) < 0) { */\n/*         ret = -1; */\n/*         goto out; */\n/*     } */\n\n/*     if (new_file_id) */\n/*         *new_file_id = g_strdup(new_dent->id); */\n\n/* out: */\n/*     if (repo) */\n/*         seaf_repo_unref (repo); */\n/*     if (head_commit) */\n/*         seaf_commit_unref(head_commit); */\n/*     string_list_free (blockids); */\n/*     string_list_free (paths); */\n/*     seaf_dirent_free (new_dent); */\n/*     g_free (root_id); */\n/*     g_free (canon_path); */\n/*     g_free (old_file_id); */\n/*     g_free (fullpath); */\n\n/*     if (ret == 0) { */\n/*         update_repo_size (repo_id); */\n/*     } */\n\n/*     return ret; */\n/* } */\n\n/* split filename into base and extension */\nstatic void\nfilename_splitext (const char *filename,\n                   char **base,\n                   char **ext)\n{\n    char *dot = strrchr(filename, '.');\n    if (!dot) {\n        *base = g_strdup(filename);\n        *ext = NULL;\n    } else {\n        *dot = '\\0';\n        *base = g_strdup(filename);\n        *dot = '.';\n\n        *ext = g_strdup(dot);\n    }\n}\n\nstatic char *\nrevert_file_to_root (SeafRepo *repo,\n                     const char *root_id,\n                     SeafDirent *old_dent,\n                     gboolean *skipped,\n                     GError **error)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dent = NULL, *newdent = NULL;\n    char *basename = NULL, *ext = NULL;\n    char new_file_name[SEAF_PATH_MAX];\n    char *new_root_id = NULL;\n    int i = 1;\n    GList *p;\n\n    *skipped = FALSE;\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               root_id,\n                                               \"/\", error);\n    if (*error) {\n        return NULL;\n    }\n\n    snprintf (new_file_name, sizeof(new_file_name), \"%s\", old_dent->name);\n\n    filename_splitext(old_dent->name, &basename, &ext);\n    for (;;) {\n        for (p = dir->entries; p; p = p->next) {\n            dent = p->data;\n            if (strcmp(dent->name, new_file_name) != 0)\n                continue;\n\n            if (S_ISREG(dent->mode)) {\n                /* same named file */\n                if (strcmp(dent->id, old_dent->id) == 0) {\n                    *skipped = TRUE;\n                    goto out;\n                } else {\n                    /* rename and retry */\n                    snprintf (new_file_name, sizeof(new_file_name), \"%s (%d)%s\",\n                              basename, i++, ext);\n                    break;\n                }\n                \n            } else if (S_ISDIR(dent->mode)) {\n                /* rename and retry */\n                snprintf (new_file_name, sizeof(new_file_name), \"%s (%d)%s\",\n                          basename, i++, ext);\n                break;\n            }\n        }\n\n        if (p == NULL)\n            break;\n    }\n\n    newdent = seaf_dirent_new (old_dent->version,\n                               old_dent->id, STD_FILE_MODE, new_file_name,\n                               old_dent->mtime, old_dent->modifier, old_dent->size);\n    new_root_id = do_post_file (repo, root_id, \"/\", newdent);\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n\n    g_free (basename);\n    g_free (ext);\n    seaf_dirent_free (newdent);\n\n    return new_root_id;\n}\n\nstatic char *\nrevert_file_to_parent_dir (SeafRepo *repo,\n                           const char *root_id,\n                           const char *parent_dir,\n                           SeafDirent *old_dent,\n                           gboolean *skipped,\n                           GError **error)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dent = NULL, *newdent = NULL;\n    char *basename = NULL, *ext = NULL;\n    char new_file_name[SEAF_PATH_MAX];\n    char *new_root_id = NULL;\n    gboolean is_overwrite = FALSE;\n    int i = 1;\n    GList *p;\n    \n    *skipped = FALSE;\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               root_id,\n                                               parent_dir, error);\n    if (*error) {\n        return NULL;\n    }\n\n    snprintf (new_file_name, sizeof(new_file_name), \"%s\", old_dent->name);\n    filename_splitext(old_dent->name, &basename, &ext);\n    while(TRUE) {\n        for (p = dir->entries; p; p = p->next) {\n            dent = p->data;\n            if (strcmp(dent->name, new_file_name) != 0)\n                continue;\n\n            if (S_ISREG(dent->mode)) {\n                /* same named file */\n                if (strcmp(dent->id, old_dent->id) == 0) {\n                    *skipped = TRUE;\n                    goto out;\n                } else {\n                    /* same name, different id: just overwrite */\n                    is_overwrite = TRUE;\n                    goto do_revert;\n                }\n                \n            } else if (S_ISDIR(dent->mode)) {\n                /* rename and retry */\n                snprintf (new_file_name, sizeof(new_file_name), \"%s (%d)%s\",\n                          basename, i++, ext);\n                break;\n            }\n        }\n\n        if (p == NULL)\n            break;\n    }\n\ndo_revert:    \n    newdent = seaf_dirent_new (old_dent->version,\n                               old_dent->id, STD_FILE_MODE, new_file_name,\n                               old_dent->mtime, old_dent->modifier, old_dent->size);\n    if (is_overwrite) {\n        new_root_id = do_put_file (repo,\n                                   root_id, parent_dir, newdent);\n    } else {\n        new_root_id = do_post_file (repo,\n                                    root_id, parent_dir, newdent);\n    }\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n\n    g_free (basename);\n    g_free (ext);\n    seaf_dirent_free (newdent);\n\n    return new_root_id;\n}\n\nstatic gboolean\ndetect_path_exist (SeafRepo *repo,\n                   const char *root_id,\n                   const char *path,\n                   GError **error)\n{\n    SeafDir *dir;\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               root_id, path, error);\n    if (*error) {\n        if (g_error_matches(*error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) {\n            /* path does not exist */\n            g_clear_error(error);\n            return FALSE;\n        } else {\n            /* Other error */\n            return FALSE;\n        }\n    }\n\n    seaf_dir_free(dir);\n    return TRUE;\n}\n\nint\nseaf_repo_manager_revert_file (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *old_commit_id,\n                               const char *file_path,\n                               const char *user,\n                               GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL, *old_commit = NULL;\n    char *parent_dir = NULL, *filename = NULL;\n    SeafDirent *old_dent = NULL;\n    char *canon_path = NULL, *root_id = NULL;\n    char buf[SEAF_PATH_MAX];\n    char time_str[512];\n    gboolean parent_dir_exist = FALSE;\n    gboolean skipped = FALSE;\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    /* If old_commit_id is head commit, do nothing. */\n    if (strcmp(repo->head->commit_id, old_commit_id) == 0) {\n        g_debug (\"[revert file] commit is head, do nothing\\n\");\n        goto out;\n    }\n\n    if (!old_commit) {\n        GET_COMMIT_OR_FAIL(old_commit, repo->id, repo->version, old_commit_id);\n        if (strcmp(old_commit->repo_id, repo_id) != 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT,\n                         \"bad commit id\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (!canon_path) {\n        canon_path = get_canonical_path (file_path);\n        if (canon_path[strlen(canon_path) -1 ] == '/') {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT,\n                         \"bad target file path\");\n            ret = -1;\n            goto out;\n        }\n\n        parent_dir  = g_path_get_dirname(canon_path);\n        filename = g_path_get_basename(canon_path);\n\n        old_dent = get_dirent_by_path (repo, old_commit->root_id,\n                                       parent_dir, filename, error);\n        if (!old_dent || S_ISDIR(old_dent->mode)) {\n            ret = -1;\n            goto out;\n        }\n        if (*error) {\n            seaf_warning (\"[revert file] error: %s\\n\", (*error)->message);\n            g_clear_error (error);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"internal error\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    parent_dir_exist = detect_path_exist (repo,\n                                          head_commit->root_id,\n                                          parent_dir, error);\n    if (*error) {\n        seaf_warning (\"[revert file] error: %s\\n\", (*error)->message);\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"internal error\");\n        ret = -1;\n        goto out;\n    }\n    \n    if (!parent_dir_exist) {\n        /* When parent dir does not exist, create the parent dir first. */\n        const char *relative_path = parent_dir;\n        if (parent_dir[0] == '/') {\n            relative_path = parent_dir + 1;\n        }\n        seaf_repo_manager_mkdir_with_parents (mgr, repo_id, \"/\", relative_path, user, error);\n        if (*error) {\n            seaf_warning (\"[revert file] failed to create parent dir: %s\\n\", (*error)->message);\n            g_clear_error (error);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"internal error\");\n            ret = -1;\n            goto out;\n        }\n\n        // Get head commit again, after mkdir with parents.\n        seaf_repo_unref (repo);\n        seaf_commit_unref (head_commit);\n        GET_REPO_OR_FAIL(repo, repo_id);\n        GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n        root_id = revert_file_to_parent_dir (repo,\n                                             head_commit->root_id, parent_dir,\n                                             old_dent,\n                                             &skipped, error);\n    } else {\n        root_id = revert_file_to_parent_dir (repo,\n                                             head_commit->root_id, parent_dir,\n                                             old_dent,\n                                             &skipped, error);\n    }\n\n    if (*error) {\n        seaf_warning (\"[revert file] error: %s\\n\", (*error)->message);\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"internal error\");\n        ret = -1;\n        goto out;\n    }\n\n    if (skipped) {\n        goto out;\n    }\n    \n    if (!root_id) {\n        seaf_warning (\"[revert file] Failed to revert file.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to revert file\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n#ifndef WIN32\n    strftime (time_str, sizeof(time_str), \"%F %T\",\n              localtime((time_t *)(&old_commit->ctime)));\n#else\n    strftime (time_str, sizeof(time_str), \"%Y-%m-%d %H:%M:%S\",\n              localtime((time_t *)(&old_commit->ctime)));\n#endif\n    snprintf(buf, SEAF_PATH_MAX, \"Reverted file \\\"%s\\\" to status at %s\", filename, time_str);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n    if (old_commit)\n        seaf_commit_unref (old_commit);\n\n    g_free (root_id);\n    g_free (parent_dir);\n    g_free (filename);\n\n    g_free (canon_path);\n    seaf_dirent_free (old_dent);\n\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\nstatic char *\nrevert_dir (SeafRepo *repo,\n            const char *root_id,\n            const char *parent_dir,\n            SeafDirent *old_dent,\n            gboolean *skipped,\n            GError **error)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dent = NULL, *newdent = NULL;\n    char new_dir_name[SEAF_PATH_MAX];\n    char *new_root_id = NULL;\n    int i = 1;\n    GList *p;\n\n    *skipped = FALSE;\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               root_id,\n                                               parent_dir, error);\n    if (*error) {\n        return NULL;\n    }\n\n    snprintf (new_dir_name, sizeof(new_dir_name), \"%s\", old_dent->name);\n\n    for (;;) {\n        for (p = dir->entries; p; p = p->next) {\n            dent = p->data;\n            if (strcmp(dent->name, new_dir_name) != 0)\n                continue;\n\n            /* the same dir */\n            if (S_ISDIR(dent->mode) && strcmp(dent->id, old_dent->id) == 0) {\n                *skipped = TRUE;\n                goto out;\n            } else {\n                /* rename and retry */\n                snprintf (new_dir_name, sizeof(new_dir_name), \"%s (%d)\",\n                          old_dent->name, i++);\n                break;\n            }\n        }\n\n        if (p == NULL)\n            break;\n    }\n\n    newdent = seaf_dirent_new (old_dent->version,\n                               old_dent->id, S_IFDIR, new_dir_name,\n                               old_dent->mtime, NULL, -1);\n    new_root_id = do_post_file (repo, root_id, parent_dir, newdent);\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n\n    seaf_dirent_free (newdent);\n\n    return new_root_id;\n}\n\nint\nseaf_repo_manager_revert_dir (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *old_commit_id,\n                              const char *dir_path,\n                              const char *user,\n                              GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL, *old_commit = NULL;\n    char *parent_dir = NULL, *dirname = NULL;\n    SeafDirent *old_dent = NULL;\n    char *canon_path = NULL, *root_id = NULL;\n    char buf[SEAF_PATH_MAX];\n    gboolean parent_dir_exist = FALSE;\n    gboolean skipped = FALSE;\n    int ret = 0;\n\n    GET_REPO_OR_FAIL(repo, repo_id);\n    GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n    /* If old_commit_id is head commit, do nothing. */\n    if (strcmp(repo->head->commit_id, old_commit_id) == 0) {\n        g_debug (\"[revert dir] commit is head, do nothing\\n\");\n        goto out;\n    }\n\n    if (!old_commit) {\n        GET_COMMIT_OR_FAIL(old_commit, repo->id, repo->version, old_commit_id);\n        if (strcmp(old_commit->repo_id, repo_id) != 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_COMMIT,\n                         \"bad commit id\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (!canon_path) {\n        canon_path = get_canonical_path (dir_path);\n\n        parent_dir  = g_path_get_dirname(canon_path);\n        dirname = g_path_get_basename(canon_path);\n\n        old_dent = get_dirent_by_path (repo, old_commit->root_id,\n                                       parent_dir, dirname, error);\n        if (!old_dent || S_ISREG(old_dent->mode)) {\n            ret = -1;\n            goto out;\n        }\n        if (*error) {\n            seaf_warning (\"[revert dir] error: %s\\n\", (*error)->message);\n            g_clear_error (error);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"internal error\");\n            ret = -1;\n            goto out;\n        }\n    }\n\n    parent_dir_exist = detect_path_exist (repo,\n                                          head_commit->root_id,\n                                          parent_dir, error);\n    if (*error) {\n        seaf_warning (\"[revert dir] error: %s\\n\", (*error)->message);\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"internal error\");\n        ret = -1;\n        goto out;\n    }\n    \n    if (!parent_dir_exist) {\n        /* When parent dir does not exist, create the parent dir first. */\n        const char *relative_path = parent_dir;\n        if (parent_dir[0] == '/') {\n            relative_path = parent_dir + 1;\n        }\n        seaf_repo_manager_mkdir_with_parents (mgr, repo_id, \"/\", relative_path, user, error);\n        if (*error) {\n            seaf_warning (\"[revert file] failed to create parent dir: %s\\n\", (*error)->message);\n            g_clear_error (error);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"internal error\");\n            ret = -1;\n            goto out;\n        }\n\n        // Get head commit again, after mkdir with parents.\n        seaf_repo_unref (repo);\n        seaf_commit_unref (head_commit);\n        GET_REPO_OR_FAIL(repo, repo_id);\n        GET_COMMIT_OR_FAIL(head_commit, repo->id, repo->version, repo->head->commit_id);\n\n        root_id = revert_dir (repo,\n                              head_commit->root_id,\n                              parent_dir,\n                              old_dent,\n                              &skipped, error);\n    } else {\n        root_id = revert_dir (repo,\n                              head_commit->root_id,\n                              parent_dir,\n                              old_dent,\n                              &skipped, error);\n    }\n\n    if (*error) {\n        seaf_warning (\"[revert dir] error: %s\\n\", (*error)->message);\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"internal error\");\n        ret = -1;\n        goto out;\n    }\n\n    if (skipped) {\n        goto out;\n    }\n    \n    if (!root_id) {\n        seaf_warning (\"[revert dir] Failed to revert dir.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to revert dir\");\n        ret = -1;\n        goto out;\n    }\n\n    /* Commit. */\n    snprintf(buf, SEAF_PATH_MAX, \"Recovered deleted directory \\\"%s\\\"\", dirname);\n    if (gen_new_commit (repo_id, head_commit, root_id,\n                        user, buf, NULL, TRUE, FALSE, NULL, error) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref (head_commit);\n    if (old_commit)\n        seaf_commit_unref (old_commit);\n\n    g_free (root_id);\n    g_free (parent_dir);\n    g_free (dirname);\n\n    g_free (canon_path);\n    seaf_dirent_free (old_dent);\n\n#define REVERT_TO_ROOT              0x1\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\ntypedef struct CollectRevisionParam CollectRevisionParam;\n\nstruct CollectRevisionParam {\n    SeafRepo *repo;\n    const char *path;\n    GList *wanted_commits;\n    GList *file_id_list;\n    GList *file_size_list;\n    int n_commits;\n    GHashTable *file_info_cache;\n    \n    /* > 0: keep a period of history;\n     * == 0: N/A\n     * < 0: keep all history data.\n     */\n    gint64 truncate_time;\n    gboolean got_latest;\n    gboolean got_second;\n    gboolean not_found_file;\n\n    GError **error;\n};\n\ntypedef struct FileInfo {\n    gint64 file_size;\n    char *file_id;\n    GList *dir_ids;\n} FileInfo;\n\nstatic void\nfree_file_info (gpointer info)\n{\n    if (!info)\n        return;\n\n    FileInfo *file_info = info;\n    g_free (file_info->file_id);\n    g_list_free_full (file_info->dir_ids, g_free);\n    g_free (file_info);\n}\n\n// compare current commit dir_id with pre commit\n// if dir_id doesn't change, it means subdir doesn't change, append all sub_dir ids of prev to current\n// that is it is no need to traverse all sub dir, if root doesn't change\nstatic gboolean\ncompare_or_add_id (GList *dir_ids,\n                   GList **cur_dir_ids,\n                   const char *dir_id)\n{\n    gboolean ret = FALSE;\n    GList *tmp = dir_ids;\n\n    if (tmp == NULL ||\n        strcmp ((char *)tmp->data, dir_id) != 0) {\n        *cur_dir_ids = g_list_append (*cur_dir_ids, g_strdup (dir_id));\n    } else {\n        // file doesn't changed, append all dir ids to this commit cache\n        while (tmp) {\n            *cur_dir_ids = g_list_append (*cur_dir_ids,\n                                          g_strdup ((char *)tmp->data));\n            tmp = tmp->next;\n        }\n        ret = TRUE;\n    }\n\n    return ret;\n}\n\n// dir_ids: all dir_ids in prev commit, in the order of fs tree\n// cur_dir_ids: all dir_ids in current commit\n// if no error and returned seafdir is NULL, then it means\n// searched dir doesn't change in pre and current commit\nstatic SeafDir*\nget_seafdir_by_path (const char *repo_id,\n                     int version,\n                     const char *root_id,\n                     const char *path,\n                     GList *dir_ids,\n                     GList **cur_dir_ids,\n                     GError **error)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dent;\n    const char *dir_id = root_id;\n    char *name, *saveptr;\n    char *tmp_path = NULL;\n    GList *tmp = dir_ids;\n\n    dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo_id, version, dir_id);\n    if (!dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING, \"directory is missing\");\n        goto out;\n    }\n\n    if (compare_or_add_id (tmp, cur_dir_ids, dir_id)) {\n        seaf_dir_free (dir);\n        dir = NULL;\n        goto out;\n    } else if (tmp) {\n        tmp = tmp->next;\n    }\n\n    if (strcmp (path, \".\") == 0 ||\n        strcmp (path, \"/\") == 0) {\n        goto out;\n    } else {\n        tmp_path = g_strdup (path);\n    }\n\n    name = strtok_r (tmp_path, \"/\", &saveptr);\n    while (name != NULL) {\n        GList *l;\n        for (l = dir->entries; l != NULL; l = l->next) {\n            dent = l->data;\n\n            if (strcmp(dent->name, name) == 0 && S_ISDIR(dent->mode)) {\n                dir_id = dent->id;\n                break;\n            }\n        }\n\n        if (!l) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST,\n                         \"Path does not exists %s\", path);\n            seaf_dir_free (dir);\n            dir = NULL;\n            break;\n        }\n\n        if (compare_or_add_id (tmp, cur_dir_ids, dir_id)) {\n            seaf_dir_free (dir);\n            dir = NULL;\n            goto out;\n        } else if (tmp) {\n            tmp = tmp->next;\n        }\n\n        SeafDir *prev = dir;\n        dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr, repo_id, version, dir_id);\n        seaf_dir_free (prev);\n\n        if (!dir) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_DIR_MISSING,\n                         \"directory is missing\");\n            break;\n        }\n\n        name = strtok_r (NULL, \"/\", &saveptr);\n    }\n\nout:\n    g_free (tmp_path);\n    return dir;\n}\n\n/*\n * Return NULL if file is not found, error is still NULL;\n * If we have IO errors, error is set.\n */\nstatic FileInfo*\nget_file_info (SeafRepo *repo,\n               SeafCommit *commit,\n               const char *path,\n               GHashTable *file_info_cache,\n               FileInfo *last_info,\n               GError **error)\n{\n    SeafDir *dir = NULL;\n    SeafDirent *dirent = NULL;\n    FileInfo *file_info = NULL;\n    GList *tmp;\n\n    file_info = g_hash_table_lookup (file_info_cache, commit->commit_id);\n    if (file_info)\n        return file_info;\n\n    char *dir_name = g_path_get_dirname (path);\n    char *file_name = g_path_get_basename (path);\n    GList *cur_dir_ids = NULL;\n    GList *dir_ids = last_info ? last_info->dir_ids : NULL;\n\n    dir = get_seafdir_by_path (repo->store_id, repo->version,\n                               commit->root_id, dir_name, dir_ids,\n                               &cur_dir_ids, error);\n    if (*error) {\n        if ((*error)->code == SEAF_ERR_PATH_NO_EXIST)\n            g_clear_error (error);\n        goto out;\n    }\n\n    if (!dir) {\n        // if no error and return is null from get_seafdir_by_path, it means dir doesn't\n        // change in pre and current commit, so the last_info (file info of pre commit)\n        // is also the current file info\n        file_info = g_new0 (FileInfo, 1);\n        file_info->file_id = g_strdup (last_info->file_id);\n        file_info->dir_ids = cur_dir_ids;\n        file_info->file_size = last_info->file_size;\n        g_hash_table_insert (file_info_cache, g_strdup (commit->commit_id),\n                             file_info);\n    } else {\n        for (tmp = dir->entries; tmp; tmp = tmp->next) {\n            dirent = tmp->data;\n            if (strcmp (file_name, dirent->name) == 0 &&\n                S_ISREG (dirent->mode)) {\n                break;\n            }\n        }\n        if (tmp) {\n            // from parent dir find the file, cache file info for the next compare\n            file_info = g_new0 (FileInfo, 1);\n            file_info->file_id = g_strdup (dirent->id);\n            file_info->dir_ids = cur_dir_ids;\n            if (repo->version > 0) {\n                file_info->file_size = dirent->size;\n            } else {\n                file_info->file_size = seaf_fs_manager_get_file_size (seaf->fs_mgr,\n                                                                      repo->store_id,\n                                                                      repo->version,\n                                                                      dirent->id);\n            }\n            g_hash_table_insert (file_info_cache, g_strdup (commit->commit_id),\n                                 file_info);\n        }\n    }\n\nout:\n    if (dir)\n        seaf_dir_free (dir);\n    if (!file_info) {\n        g_list_free_full (cur_dir_ids, g_free);\n    }\n    g_free (file_name);\n    g_free (dir_name);\n\n    return file_info;\n}\n\nstatic void\nadd_revision_info (CollectRevisionParam *data,\n                   SeafCommit *commit, const char *file_id, gint64 file_size)\n{\n    seaf_commit_ref (commit);\n    data->wanted_commits = g_list_prepend (data->wanted_commits, commit);\n    data->file_id_list = g_list_prepend (data->file_id_list, g_strdup(file_id));\n    gint64 *size = g_malloc(sizeof(gint64));\n    *size = file_size;\n    data->file_size_list = g_list_prepend (data->file_size_list, size);\n    ++(data->n_commits);\n}\n\nstatic gboolean\ncollect_file_revisions (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    CollectRevisionParam *data = vdata;\n    SeafRepo *repo = data->repo;\n    const char *path = data->path;\n    GError **error = data->error;\n    GHashTable *file_info_cache = data->file_info_cache;\n    FileInfo *file_info = NULL;\n    FileInfo *parent1_info = NULL;\n    FileInfo *parent2_info = NULL;\n\n    SeafCommit *parent_commit = NULL;\n    SeafCommit *parent_commit2 = NULL;\n\n    gboolean ret = TRUE;\n\n    /* At least find the latest revision. */\n    if (data->got_latest && data->truncate_time == 0) {\n        *stop = TRUE;\n        return TRUE;\n    }\n\n    if (data->got_latest &&\n        data->truncate_time > 0 &&\n        (gint64)(commit->ctime) < data->truncate_time &&\n        data->got_second)\n    {\n        *stop = TRUE;\n        data->not_found_file = TRUE;\n        return TRUE;\n    }\n\n    g_clear_error (error);\n\n    file_info = get_file_info (data->repo, commit, path,\n                               file_info_cache, NULL, error);\n    if (*error) {\n        seaf_warning (\"Error when finding %s under %s:%s\\n\",\n                      path, data->repo->id, commit->commit_id);\n        ret = FALSE;\n        goto out;\n    }\n\n    if (!file_info) {\n        /* Target file is not present in this commit.\n         * Stop traversing after finding the initial version.\n         * Deleted files with the same path are not included in history.\n         */\n        *stop = TRUE;\n        data->not_found_file = TRUE;\n        goto out;\n    }\n\n    if (!commit->parent_id) {\n        /* Initial commit */\n        add_revision_info (data, commit, file_info->file_id, file_info->file_size);\n        goto out;\n    }\n\n    parent_commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                    repo->id, repo->version,\n                                                    commit->parent_id);\n    if (!parent_commit) {\n        seaf_warning (\"Failed to get commit %s:%s\\n\", repo->id, commit->parent_id);\n        ret = FALSE;\n        goto out;\n    }\n\n    parent1_info = get_file_info (data->repo, parent_commit, path,\n                                  file_info_cache, file_info, error);\n    if (*error) {\n        seaf_warning (\"Error when finding %s under %s:%s\\n\",\n                      path, data->repo->id, parent_commit->commit_id);\n        ret = FALSE;\n        goto out;\n    }\n\n    if (parent1_info &&\n        g_strcmp0 (parent1_info->file_id, file_info->file_id) == 0) {\n        /* This commit does not modify the target file */\n        goto out;\n    }\n\n    /* In case of a merge, the second parent also need compare */\n    if (commit->second_parent_id) {\n        parent_commit2 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                         repo->id, repo->version,\n                                                         commit->second_parent_id);\n        if (!parent_commit2) {\n            seaf_warning (\"Failed to get commit %s:%s\\n\",\n                          repo->id, commit->second_parent_id);\n            ret = FALSE;\n            goto out;\n        }\n\n        parent2_info = get_file_info (data->repo, parent_commit2, path,\n                                      file_info_cache, file_info, error);\n        if (*error) {\n            seaf_warning (\"Error when finding %s under %s:%s\\n\",\n                          path, data->repo->id, parent_commit2->commit_id);\n            ret = FALSE;\n            goto out;\n        }\n\n        if (parent2_info &&\n            g_strcmp0 (parent2_info->file_id, file_info->file_id) == 0) {\n            /* This commit does not modify the target file */\n            goto out;\n        }\n    }\n\n    if (!data->got_latest) {\n        data->got_latest = TRUE;\n    } else {\n        if (!data->got_second)\n            data->got_second = TRUE;\n    }\n    add_revision_info (data, commit, file_info->file_id, file_info->file_size);\n\nout:\n    if (parent_commit) seaf_commit_unref (parent_commit);\n    if (parent_commit2) seaf_commit_unref (parent_commit2);\n\n    g_hash_table_remove (file_info_cache, commit->commit_id);\n\n    return ret;\n}\n\nstatic gboolean\npath_exists_in_commit (SeafRepo *repo, const char *commit_id, const char *path)\n{\n    SeafCommit *c = NULL;\n    char *obj_id;\n    guint32 mode;\n\n    c = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                        repo->id, repo->version,\n                                        commit_id);\n    if (!c) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\", repo->id, commit_id);\n        return FALSE;\n    }\n    obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                             repo->store_id,\n                                             repo->version,\n                                             c->root_id,\n                                             path,\n                                             &mode,\n                                             NULL);\n    seaf_commit_unref (c);\n    if (!obj_id)\n        return FALSE;\n    g_free (obj_id);\n    return TRUE;\n}\n\nstatic gboolean\ndetect_rename_revision (SeafRepo *repo,\n                        SeafCommit *commit,\n                        const char *path,\n                        char **parent_id,\n                        char **old_path)\n{\n    GList *diff_res = NULL;\n    SeafCommit *p1 = NULL;\n    int rc;\n    gboolean is_renamed = FALSE;\n\n    while (*path == '/' && *path != 0)\n        ++path;\n\n    if (!commit->second_parent_id) {\n        p1 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             commit->parent_id);\n        if (!p1) {\n            seaf_warning (\"Failed to get commit %s:%.8s.\\n\",\n                          repo->id, commit->parent_id);\n            return FALSE;\n        }\n        /* Don't fold diff results for directories. We need to know a file was\n         * renamed when its parent folder was renamed.\n         */\n        rc = diff_commits (p1, commit, &diff_res, FALSE);\n        seaf_commit_unref (p1);\n        if (rc < 0) {\n            seaf_warning (\"Failed to diff.\\n\");\n            return FALSE;\n        }\n    } else {\n        rc = diff_merge (commit, &diff_res, FALSE);\n        if (rc < 0) {\n            seaf_warning (\"Failed to diff merge.\\n\");\n            return FALSE;\n        }\n    }\n\n    GList *ptr;\n    DiffEntry *de;\n    for (ptr = diff_res; ptr; ptr = ptr->next) {\n        de = ptr->data;\n        if (de->status == DIFF_STATUS_RENAMED && strcmp (de->new_name, path) == 0) {\n            *old_path = g_strdup(de->name);\n            is_renamed = TRUE;\n            break;\n        }\n    }\n    for (ptr = diff_res; ptr; ptr = ptr->next)\n        diff_entry_free ((DiffEntry *)ptr->data);\n    g_list_free (diff_res);\n\n    if (!is_renamed)\n        return FALSE;\n\n    /* Determine parent commit containing the old path. */\n    if (!commit->second_parent_id)\n        *parent_id = g_strdup(commit->parent_id);\n    else {\n        if (path_exists_in_commit (repo, commit->parent_id, *old_path))\n            *parent_id = g_strdup(commit->parent_id);\n        else if (path_exists_in_commit (repo, commit->second_parent_id, *old_path))\n            *parent_id = g_strdup(commit->second_parent_id);\n        else {\n            g_free (*old_path);\n            *old_path = NULL;\n            return FALSE;\n        }\n    }\n\n    return TRUE;\n}\n\nstatic SeafileCommit *\nconvert_to_seafile_commit (SeafCommit *c)\n{\n    SeafileCommit *commit = seafile_commit_new ();\n    g_object_set (commit,\n                  \"id\", c->commit_id,\n                  \"creator_name\", c->creator_name,\n                  \"creator\", c->creator_id,\n                  \"desc\", c->desc,\n                  \"ctime\", c->ctime,\n                  \"repo_id\", c->repo_id,\n                  \"root_id\", c->root_id,\n                  \"parent_id\", c->parent_id,\n                  \"second_parent_id\", c->second_parent_id,\n                  \"version\", c->version,\n                  \"new_merge\", c->new_merge,\n                  \"conflict\", c->conflict,\n                  \"device_name\", c->device_name,\n                  \"client_version\", c->client_version,\n                  NULL);\n    return commit;\n}\n\nstatic GList *\nconvert_rpc_commit_list (GList *commit_list,\n                         GList *file_id_list,\n                         GList *file_size_list,\n                         gboolean is_renamed,\n                         const char *renamed_old_path)\n{\n    GList *ret = NULL;\n    GList *ptr1, *ptr2, *ptr3;\n    SeafCommit *c;\n    char *file_id;\n    gint64 *file_size;\n    SeafileCommit *commit;\n\n    for (ptr1 = commit_list, ptr2 = file_id_list, ptr3 = file_size_list;\n         ptr1 && ptr2 && ptr3;\n         ptr1 = ptr1->next, ptr2 = ptr2->next, ptr3 = ptr3->next) {\n        c = ptr1->data;\n        file_id = ptr2->data;\n        file_size = ptr3->data;\n        commit = convert_to_seafile_commit (c);\n        g_object_set (commit, \"rev_file_id\", file_id, \"rev_file_size\", *file_size,\n                      NULL);\n        if (ptr1->next == NULL && is_renamed)\n            g_object_set (commit, \"rev_renamed_old_path\", renamed_old_path, NULL);\n        ret = g_list_prepend (ret, commit);\n    }\n\n    ret = g_list_reverse (ret);\n    return ret;\n}\n\nGList *\nseaf_repo_manager_list_file_revisions (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       const char *start_commit_id,\n                                       const char *path,\n                                       int limit,\n                                       gboolean got_latest,\n                                       gboolean got_second,\n                                       GError **error)\n{\n    SeafRepo *repo = NULL;\n    GList *commit_list = NULL, *file_id_list = NULL, *file_size_list = NULL;\n    GList *ret = NULL, *ptr;\n    CollectRevisionParam data = {0};\n    SeafCommit *last_commit = NULL;\n    const char *head_id;\n    gboolean is_renamed = FALSE;\n    char *parent_id = NULL, *old_path = NULL;\n    char *next_start_commit= NULL;\n\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"No such repo %s\", repo_id);\n        goto out;\n    }\n\n    data.repo = repo;\n\n    if (!start_commit_id)\n        head_id = repo->head->commit_id;\n    else\n        head_id = start_commit_id;\n\n    data.path = path;\n    data.error = error;\n\n    data.truncate_time = seaf_repo_manager_get_repo_truncate_time (mgr, repo_id);\n\n    data.wanted_commits = NULL;\n    data.file_id_list = NULL;\n    data.file_size_list = NULL;\n    data.got_latest = got_latest;\n    data.got_second = got_second;\n    data.not_found_file = FALSE;\n\n    /* A hash table to cache caculated file info of <path> in <commit> */\n    data.file_info_cache = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                  g_free, free_file_info);\n\n    if (!seaf_commit_manager_traverse_commit_tree_with_limit (seaf->commit_mgr,\n                                                              repo->id,\n                                                              repo->version,\n                                                              head_id,\n                                                              (CommitTraverseFunc)collect_file_revisions,\n                                                              limit, &data, &next_start_commit, TRUE)) {\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"failed to traverse commit of repo %s\", repo_id);\n        goto out;\n    }\n\n    if (data.wanted_commits) {\n        last_commit = data.wanted_commits->data;\n        is_renamed = detect_rename_revision (repo,\n                                             last_commit, path, &parent_id, &old_path);\n        if (data.not_found_file && !is_renamed) {   // reached file initial commit.\n            g_free (next_start_commit);\n            next_start_commit = NULL;\n        } else if (is_renamed){    // file renamed.\n            g_free (next_start_commit);\n            next_start_commit = g_strdup (parent_id);\n        }\n        commit_list = g_list_reverse (data.wanted_commits);\n        file_id_list = g_list_reverse (data.file_id_list);\n        file_size_list = g_list_reverse (data.file_size_list);\n\n        char *rename_path = NULL;\n        if (old_path && *old_path != '/')\n            rename_path = g_strconcat (\"/\", old_path, NULL);\n        else\n            rename_path = g_strdup (old_path);\n\n        ret = convert_rpc_commit_list (commit_list, file_id_list, file_size_list,\n                                       is_renamed, rename_path);\n        g_free (rename_path);\n    } else {\n        if (data.not_found_file) {\n            g_free (next_start_commit);\n            next_start_commit = NULL;\n        }\n    }\n\n    /* Append one commit that only contains 'next_start_commit' */\n    SeafileCommit *commit = seafile_commit_new ();\n    g_object_set (commit, \"next_start_commit\", next_start_commit, NULL);\n    ret = g_list_append (ret, commit);\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    for (ptr = commit_list; ptr; ptr = ptr->next)\n        seaf_commit_unref ((SeafCommit *)ptr->data);\n    g_list_free (commit_list);\n    string_list_free (file_id_list);\n    for (ptr = file_size_list; ptr; ptr = ptr->next)\n        g_free (ptr->data);\n    g_list_free (file_size_list);\n    if (data.file_info_cache)\n        g_hash_table_destroy (data.file_info_cache);\n    g_free (old_path);\n    g_free (parent_id);\n    g_free (next_start_commit);\n\n    return ret;\n}\n\ntypedef struct CalcFilesLastModifiedParam CalcFilesLastModifiedParam;\n\nstruct CalcFilesLastModifiedParam {\n    SeafRepo *repo;\n    GError **error;\n    const char *parent_dir;\n    GHashTable *last_modified_hash;\n    GHashTable *current_file_id_hash;\n    SeafCommit *current_commit;\n};\n\nstatic gboolean\ncheck_non_existing_files (void *key, void *value, void *vdata)\n{\n    CalcFilesLastModifiedParam *data = vdata;\n    gboolean remove = FALSE;\n    \n    char *file_name = key;\n    gint64 *ctime = g_hash_table_lookup (data->last_modified_hash, file_name);\n    if (!ctime) {\n        /* Impossible */\n        remove = TRUE;\n    } else if (*ctime != data->current_commit->ctime) {\n        /* This file does not exist in this commit. So it's last modified in\n         * the previous commit.\n         */\n        remove = TRUE;\n    }\n\n    return remove;\n}\n\nstatic gboolean\ncollect_files_last_modified (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    CalcFilesLastModifiedParam *data = vdata;\n    GError **error = data->error;\n    SeafDirent *dent = NULL;\n    char *file_id = NULL;\n    SeafDir *dir = NULL;\n    GList *ptr;\n    gboolean ret = TRUE;\n\n    data->current_commit = commit;\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               data->repo->store_id,\n                                               data->repo->version,\n                                               commit->root_id,\n                                               data->parent_dir,\n                                               error);\n    if (*error) {\n        if (!g_error_matches(*error, SEAFILE_DOMAIN, SEAF_ERR_PATH_NO_EXIST)) {\n            *stop = TRUE;\n            ret = FALSE;\n            goto out;\n        } else {\n            g_clear_error (error);\n        }\n    }\n\n    if (!dir) {\n        /* The directory does not exist in this commit. So all files are last\n         * modified in the previous commit;\n         */\n        *stop = TRUE;\n        goto out;\n    }\n\n    for (ptr = dir->entries; ptr; ptr = ptr->next) {\n        dent = ptr->data;\n        file_id = g_hash_table_lookup (data->current_file_id_hash, dent->name);\n        if (file_id) {\n            if (strcmp(file_id, dent->id) != 0) {\n                g_hash_table_remove (data->current_file_id_hash, dent->name);\n            } else {\n                gint64 *ctime = g_new (gint64, 1);\n                *ctime = commit->ctime;\n                g_hash_table_replace (data->last_modified_hash, g_strdup(dent->name), ctime);\n            }\n        }\n\n        if (g_hash_table_size(data->current_file_id_hash) == 0) {\n            *stop = TRUE;\n            goto out;\n        }\n    }\n\n    /* Files not found in the current commit are last modified in the previous\n     * commit */\n    g_hash_table_foreach_remove (data->current_file_id_hash,\n                                 check_non_existing_files, data);\n\n    if (g_hash_table_size(data->current_file_id_hash) == 0) {\n        /* All files under this diretory have been calculated  */\n        *stop = TRUE;\n        goto out;\n    }\n\nout:\n    seaf_dir_free (dir);\n\n    return ret;\n}\n\n/**\n * Give a directory, return the last modification timestamps of all the files\n * under this directory.\n *\n * First we record the current id of every file, then traverse the commit\n * tree. Give a commit, for each file, if the file id in that commit is\n * different than its current id, then this file is last modified in the\n * commit previous to that commit.\n */\nGList *\nseaf_repo_manager_calc_files_last_modified (SeafRepoManager *mgr,\n                                            const char *repo_id,\n                                            const char *parent_dir,\n                                            int limit,\n                                            GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head_commit = NULL;\n    SeafDir *dir = NULL;\n    GList *ptr = NULL;\n    SeafDirent *dent = NULL; \n    CalcFilesLastModifiedParam data = {0};\n    GList *ret_list = NULL;\n\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"No such repo %s\", repo_id);\n        goto out;\n    }\n\n    head_commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                  repo->id, repo->version, \n                                                  repo->head->commit_id);\n    if (!head_commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get commit %s\", repo->head->commit_id);\n        goto out;\n    }\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               head_commit->root_id,\n                                               parent_dir, error);\n    if (*error || !dir) {\n        goto out;\n    }\n\n    data.repo = repo;\n    \n    /* A hash table of pattern (file_name, current_file_id) */\n    data.current_file_id_hash = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                       g_free, g_free);\n    /* A (file_name, last_modified) hashtable. <last_modified> is a heap\n       allocated gint64\n    */\n    data.last_modified_hash = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                     g_free, g_free);\n    for (ptr = dir->entries; ptr; ptr = ptr->next) {\n        dent = ptr->data;\n        g_hash_table_insert (data.current_file_id_hash,\n                             g_strdup(dent->name),\n                             g_strdup(dent->id));\n\n        gint64 *ctime = g_new (gint64, 1);\n        *ctime = head_commit->ctime;\n        g_hash_table_insert (data.last_modified_hash,\n                             g_strdup(dent->name), \n                             ctime);\n    }\n\n    if (g_hash_table_size (data.current_file_id_hash) == 0) {\n        /* An empty directory, no need to traverse */\n        goto out;\n    }\n\n    data.parent_dir = parent_dir;\n    data.error = error;\n\n    if (!seaf_commit_manager_traverse_commit_tree_with_limit (seaf->commit_mgr,\n                                                              repo->id, repo->version, \n                                                        repo->head->commit_id,\n                                (CommitTraverseFunc)collect_files_last_modified,\n                                                              limit, &data, NULL, FALSE)) {\n        if (*error)\n            seaf_warning (\"error when traversing commits: %s\\n\", (*error)->message);\n        else\n            seaf_warning (\"error when traversing commits.\\n\");\n        g_clear_error (error);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"failed to traverse commit of repo %s\", repo_id);\n        goto out;\n    }\n\n    GHashTableIter iter;\n    gpointer key, value;\n\n    g_hash_table_iter_init (&iter, data.last_modified_hash);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        SeafileFileLastModifiedInfo *info;\n        gint64 last_modified = *(gint64 *)value;\n        info = g_object_new (SEAFILE_TYPE_FILE_LAST_MODIFIED_INFO,\n                             \"file_name\", key,\n                             \"last_modified\", last_modified,\n                             NULL);\n        ret_list = g_list_prepend (ret_list, info);\n    }\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (head_commit)\n        seaf_commit_unref(head_commit);\n    if (data.last_modified_hash)\n        g_hash_table_destroy (data.last_modified_hash);\n    if (data.current_file_id_hash)\n        g_hash_table_destroy (data.current_file_id_hash);\n    if (dir)\n        seaf_dir_free (dir);\n\n    return g_list_reverse(ret_list);\n}\n\nint\nseaf_repo_manager_revert_on_server (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *commit_id,\n                                    const char *user_name,\n                                    GError **error)\n{\n    SeafRepo *repo;\n    SeafCommit *commit = NULL, *new_commit = NULL;\n    char desc[512];\n    int ret = 0;\n\nretry:\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"No such repo\");\n        return -1;\n    }\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version, \n                                             commit_id);\n    if (!commit) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Commit doesn't exist\");\n        ret = -1;\n        goto out;\n    }\n\n#ifndef WIN32\n    strftime (desc, sizeof(desc), \"Reverted repo to status at %F %T.\", \n              localtime((time_t *)(&commit->ctime)));\n#else\n    strftime (desc, sizeof(desc), \"Reverted repo to status at %Y-%m-%d %H:%M:%S.\",\n              localtime((time_t *)(&commit->ctime)));\n#endif\n\n    new_commit = seaf_commit_new (NULL, repo->id, commit->root_id,\n                                  user_name, EMPTY_SHA1,\n                                  desc, 0);\n\n    new_commit->parent_id = g_strdup (repo->head->commit_id);\n    seaf_repo_to_commit (repo, new_commit);\n\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, new_commit) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    seaf_branch_set_commit (repo->head, new_commit->commit_id);\n    if (seaf_branch_manager_test_and_update_branch (seaf->branch_mgr,\n                                                    repo->head,\n                                                    new_commit->parent_id,\n                                                    FALSE, NULL, NULL, NULL) < 0)\n    {\n        seaf_repo_unref (repo);\n        seaf_commit_unref (commit);\n        seaf_commit_unref (new_commit);\n        repo = NULL;\n        commit = new_commit = NULL;\n        goto retry;\n    }\n\n    seaf_repo_manager_merge_virtual_repo (mgr, repo_id, NULL);\n\nout:\n    if (new_commit)\n        seaf_commit_unref (new_commit);\n    if (commit)\n        seaf_commit_unref (commit);\n    if (repo)\n        seaf_repo_unref (repo);\n\n    if (ret == 0) {\n        update_repo_size (repo_id);\n    }\n\n    return ret;\n}\n\nstatic void\nadd_deleted_entry (SeafRepo *repo,\n                   GHashTable *entries,\n                   SeafDirent *dent,\n                   const char *base,\n                   SeafCommit *child,\n                   SeafCommit *parent)\n{\n    char *path = g_strconcat (base, dent->name, NULL);\n    SeafileDeletedEntry *entry;\n    Seafile *file;\n\n    if (g_hash_table_lookup (entries, path) != NULL) {\n        /* g_debug (\"found dup deleted entry for %s.\\n\", path); */\n        g_free (path);\n        return;\n    }\n\n    /* g_debug (\"Add deleted entry for %s.\\n\", path); */\n\n    entry = g_object_new (SEAFILE_TYPE_DELETED_ENTRY,\n                          \"commit_id\", parent->commit_id,\n                          \"obj_id\", dent->id,\n                          \"obj_name\", dent->name,\n                          \"basedir\", base,\n                          \"mode\", dent->mode,\n                          \"delete_time\", child->ctime,\n                          NULL);\n\n    if (S_ISREG(dent->mode)) {\n        file = seaf_fs_manager_get_seafile (seaf->fs_mgr,\n                                            repo->store_id, repo->version,\n                                            dent->id);\n        if (!file) {\n            g_free (path);\n            g_object_unref (entry);\n            return;\n        }\n        g_object_set (entry, \"file_size\", file->file_size, NULL);\n        seafile_unref (file);\n    }\n\n    g_hash_table_insert (entries, path, entry);\n}\n\nstatic int\nfind_deleted_recursive (SeafRepo *repo,\n                        SeafDir *d1,\n                        SeafDir *d2,\n                        const char *base,\n                        SeafCommit *child,\n                        SeafCommit *parent,\n                        GHashTable *entries)\n{\n    GList *p1, *p2;\n    SeafDirent *dent1, *dent2;\n    int res, ret = 0;\n\n    p1 = d1->entries;\n    p2 = d2->entries;\n\n    /* Since dirents are sorted in descending order, we can use merge\n     * algorithm to find out deleted entries.\n     * Deleted entries are those:\n     * 1. exists in d2 but absent in d1.\n     * 2. exists in both d1 and d2 but with different type.\n     */\n\n    while (p1 && p2) {\n        dent1 = p1->data;\n        dent2 = p2->data;\n\n        res = g_strcmp0 (dent1->name, dent2->name);\n        if (res < 0) {\n            /* exists in d2 but absent in d1. */\n            add_deleted_entry (repo, entries, dent2, base, child, parent);\n            p2 = p2->next;\n        } else if (res == 0) {\n            if ((dent1->mode & S_IFMT) != (dent2->mode & S_IFMT)) {\n                /* both exists but with diffent type. */\n                add_deleted_entry (repo, entries, dent2, base, child, parent);\n            } else if (S_ISDIR(dent1->mode) && strcmp(dent1->id, dent2->id) != 0) {\n                SeafDir *n1 = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr,\n                                                                  repo->store_id,\n                                                                  repo->version,\n                                                                  dent1->id);\n                if (!n1) {\n                    seaf_warning (\"Failed to find dir %s:%s.\\n\", repo->id, dent1->id);\n                    return -1;\n                }\n\n                SeafDir *n2 = seaf_fs_manager_get_seafdir_sorted (seaf->fs_mgr,\n                                                                  repo->store_id,\n                                                                  repo->version,\n                                                                  dent2->id);\n                if (!n2) {\n                    seaf_warning (\"Failed to find dir %s:%s.\\n\", repo->id, dent2->id);\n                    seaf_dir_free (n1);\n                    return -1;\n                }\n\n                char *new_base = g_strconcat (base, dent1->name, \"/\", NULL);\n                ret = find_deleted_recursive (repo, n1, n2, new_base,\n                                              child, parent, entries);\n                g_free (new_base);\n                seaf_dir_free (n1);\n                seaf_dir_free (n2);\n                if (ret < 0)\n                    return ret;\n            }\n            p1 = p1->next;\n            p2 = p2->next;\n        } else {\n            p1 = p1->next;\n        }\n    }\n\n    for ( ; p2 != NULL; p2 = p2->next) {\n        dent2 = p2->data;\n        add_deleted_entry (repo, entries, dent2, base, child, parent);\n    }\n\n    return ret;\n}\n\nstatic int\nfind_deleted (SeafRepo *repo,\n              SeafCommit *child,\n              SeafCommit *parent,\n              const char *base,\n              GHashTable *entries)\n{\n    SeafDir *d1, *d2;\n    int ret = 0;\n\n    d1 = seaf_fs_manager_get_seafdir_sorted_by_path (seaf->fs_mgr,\n                                                     repo->store_id,\n                                                     repo->version,\n                                                     child->root_id, base);\n    if (!d1) {\n        return ret;\n    }\n\n    d2 = seaf_fs_manager_get_seafdir_sorted_by_path (seaf->fs_mgr,\n                                                     repo->store_id,\n                                                     repo->version,\n                                                     parent->root_id, base);\n    if (!d2) {\n        seaf_dir_free (d1);\n        return ret;\n    }\n\n    ret = find_deleted_recursive (repo, d1, d2, base, child, parent, entries);\n\n    seaf_dir_free (d2);\n    seaf_dir_free (d1);\n\n    return ret;\n}\n\ntypedef struct CollectDelData {\n    SeafRepo *repo;\n    GHashTable *entries;\n    gint64 truncate_time;\n    char *path;\n} CollectDelData;\n\n#define DEFAULT_RECYCLE_DAYS 7\n\nstatic gboolean\ncollect_deleted (SeafCommit *commit, void *vdata, gboolean *stop)\n{\n    CollectDelData *data = vdata;\n    SeafRepo *repo = data->repo;\n    GHashTable *entries = data->entries;\n    gint64 truncate_time = data->truncate_time;\n    SeafCommit *p1, *p2;\n\n    /* We use <= here. This is for handling clean trash and history.\n     * If the user cleans all history, truncate time will be equal to\n     * the head commit's ctime. In such case, we don't actually want to display\n     * any deleted file.\n     */\n    if ((gint64)(commit->ctime) <= truncate_time) {\n        *stop = TRUE;\n        return TRUE;\n    }\n\n    if (commit->parent_id == NULL)\n        return TRUE;\n\n    if (!(strstr (commit->desc, PREFIX_DEL_FILE) != NULL ||\n          strstr (commit->desc, PREFIX_DEL_DIR) != NULL ||\n          strstr (commit->desc, PREFIX_DEL_DIRS) != NULL)) {\n        return TRUE;\n    }\n\n    p1 = seaf_commit_manager_get_commit (commit->manager,\n                                         repo->id, repo->version,\n                                         commit->parent_id);\n    if (!p1) {\n        seaf_warning (\"Failed to find commit %s:%s.\\n\", repo->id, commit->parent_id);\n        return FALSE;\n    }\n\n    if (find_deleted (data->repo, commit, p1, data->path, entries) < 0) {\n        seaf_commit_unref (p1);\n        return FALSE;\n    }\n\n    seaf_commit_unref (p1);\n\n    if (commit->second_parent_id) {\n        p2 = seaf_commit_manager_get_commit (commit->manager,\n                                             repo->id, repo->version,\n                                             commit->second_parent_id);\n        if (!p2) {\n            seaf_warning (\"Failed to find commit %s:%s.\\n\",\n                          repo->id, commit->second_parent_id);\n            return FALSE;\n        }\n\n        if (find_deleted (data->repo, commit, p2, data->path, entries) < 0) {\n            seaf_commit_unref (p2);\n            return FALSE;\n        }\n\n        seaf_commit_unref (p2);\n    }\n\n    return TRUE;\n}\n\ntypedef struct RemoveExistingParam {\n    SeafRepo *repo;\n    SeafCommit *head;\n} RemoveExistingParam;\n\nstatic gboolean\nremove_existing (gpointer key, gpointer value, gpointer user_data)\n{\n    SeafileDeletedEntry *e = value;\n    RemoveExistingParam *param = user_data;\n    SeafRepo *repo = param->repo;\n    SeafCommit *head = param->head;\n    guint32 mode = seafile_deleted_entry_get_mode(e), mode_out = 0;\n    char *path = key;\n\n    char *obj_id = seaf_fs_manager_path_to_obj_id (seaf->fs_mgr,\n                                                   repo->store_id, repo->version,\n                                                   head->root_id,\n                                                   path, &mode_out, NULL);\n    if (obj_id == NULL)\n        return FALSE;\n    g_free (obj_id);\n\n    /* If path exist in head commit and with the same type,\n     * remove it from deleted entries.\n     */\n    if ((mode & S_IFMT) == (mode_out & S_IFMT)) {\n        /* g_debug (\"%s exists in head commit.\\n\", path); */\n        return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic int\nfilter_out_existing_entries (GHashTable *entries,\n                             SeafRepo *repo,\n                             const char *head_id)\n{\n    SeafCommit *head;\n    RemoveExistingParam param;\n\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo->id, repo->version, \n                                           head_id);\n    if (!head) {\n        seaf_warning (\"Failed to find head commit %s of repo %s.\\n\",\n                      head_id, repo->id);\n        return -1;\n    }\n\n    param.repo = repo;\n    param.head = head;\n\n    g_hash_table_foreach_remove (entries, remove_existing, &param);\n\n    seaf_commit_unref (head);\n    return 0;\n}\n\nstatic gboolean\nhash_to_list (gpointer key, gpointer value, gpointer user_data)\n{\n    GList **plist = (GList **)user_data;\n\n    g_free (key);\n    *plist = g_list_prepend (*plist, value);\n\n    return TRUE;\n}\n\nstatic gint\ncompare_commit_by_time_ex (gconstpointer a, gconstpointer b)\n{\n    const SeafCommit *commit_a = a;\n    const SeafCommit *commit_b = b;\n\n    /* Latest commit comes first in the list. */\n    return (commit_b->ctime - commit_a->ctime);\n}\n\nstatic gint\ncompare_commit_by_time (gconstpointer a, gconstpointer b, gpointer unused)\n{\n    return compare_commit_by_time_ex(a, b);\n}\n\nstatic int\ninsert_parent_commit (GList **list, GHashTable *hash,\n                      const char *repo_id, int version,\n                      const char *parent_id)\n{\n    SeafCommit *p;\n    char *key;\n\n    if (g_hash_table_lookup (hash, parent_id) != NULL)\n        return 0;\n\n    p = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                        repo_id, version,\n                                        parent_id);\n    if (!p) {\n        seaf_warning (\"Failed to find commit %s\\n\", parent_id);\n        return -1;\n    }\n\n    *list = g_list_insert_sorted_with_data (*list, p,\n                                           compare_commit_by_time,\n                                           NULL);\n\n    key = g_strdup (parent_id);\n    g_hash_table_replace (hash, key, key);\n\n    return 0;\n}\n\nstatic GList *\nscan_stat_to_list(const char *scan_stat, GHashTable *commit_hash, SeafRepo *repo)\n{\n    json_t *commit_array = NULL, *commit_obj = NULL;\n    char *commit_id = NULL;\n    SeafCommit *commit = NULL;\n    GList *list = NULL;\n    char *key;\n    commit_array = json_loadb (scan_stat, strlen(scan_stat), 0, NULL);\n    if (!commit_array) {\n        return NULL;\n    }\n    int i;\n    for (i = 0; i < json_array_size (commit_array); i++) {\n        commit_obj = json_array_get (commit_array, i);\n        commit_id = json_string_value (commit_obj);\n        if (commit_id && strlen(commit_id) == 40) {\n            commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id,\n                                                    repo->version, commit_id);\n            if (!commit) {\n                return NULL;\n            }\n            list = g_list_prepend (list, commit);\n            key = g_strdup (commit->commit_id);\n            g_hash_table_replace (commit_hash, key, key);\n        }\n    }\n    json_decref (commit_array);\n    list = g_list_sort (list, compare_commit_by_time_ex);\n    return list;\n}\n\nstatic int\nscan_commits_for_collect_deleted (CollectDelData *data,\n                                  const char *prev_scan_stat,\n                                  int limit,\n                                  char **next_scan_stat)\n{\n    GList *list = NULL;\n    SeafCommit *commit;\n    GHashTable *commit_hash;\n    SeafRepo *repo = data->repo;\n    int scan_num = 0;\n    gboolean ret = TRUE;\n\n    /* A hash table for recording id of traversed commits. */\n    commit_hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);\n\n    if (prev_scan_stat == NULL) {\n        commit = seaf_commit_manager_get_commit (seaf->commit_mgr, repo->id,\n                                                 repo->version, repo->head->commit_id);\n        if (!commit) {\n            ret = FALSE;\n            goto out;\n        }\n        list = g_list_prepend (list, commit);\n        char *key = g_strdup (commit->commit_id);\n        g_hash_table_replace (commit_hash, key, key);\n    } else {\n        list = scan_stat_to_list (prev_scan_stat, commit_hash, repo);\n        if (list == NULL) {\n            ret = FALSE;\n            goto out;\n        }\n    }\n\n    while (list) {\n        gboolean stop = FALSE;\n        commit = list->data;\n        list = g_list_delete_link (list, list);\n\n        if (!collect_deleted (commit, data, &stop)) {\n            seaf_warning(\"[comit-mgr] CommitTraverseFunc failed\\n\");\n            seaf_commit_unref (commit);\n            ret = FALSE;\n            goto out;\n        }\n\n        if (stop) {\n            seaf_commit_unref (commit);\n            /* stop traverse down from this commit,\n             * but not stop traversing the tree\n             */\n            continue;\n        }\n\n        if (commit->parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo->id,\n                                      repo->version,\n                                      commit->parent_id) < 0) {\n                seaf_warning(\"[comit-mgr] insert parent commit failed\\n\");\n                seaf_commit_unref (commit);\n                ret = FALSE;\n                goto out;\n            }\n        }\n        if (commit->second_parent_id) {\n            if (insert_parent_commit (&list, commit_hash, repo->id,\n                                      repo->version,\n                                      commit->second_parent_id) < 0) {\n                seaf_warning(\"[comit-mgr]insert second parent commit failed\\n\");\n                seaf_commit_unref (commit);\n                ret = FALSE;\n                goto out;\n            }\n        }\n        seaf_commit_unref (commit);\n\n        if (++scan_num >= limit) {\n            break;\n        }\n    }\n\n    json_t *commit_array = json_array ();\n    while (list) {\n        commit = list->data;\n        json_array_append_new (commit_array, json_string (commit->commit_id));\n        seaf_commit_unref (commit);\n        list = g_list_delete_link (list, list);\n    }\n    if (json_array_size(commit_array) > 0) {\n        char *commits = json_dumps (commit_array, JSON_COMPACT);\n        *next_scan_stat = commits;\n    }\n    json_decref (commit_array);\n    g_hash_table_destroy (commit_hash);\n\n    return ret;\n\nout:\n    g_hash_table_destroy (commit_hash);\n    while (list) {\n        commit = list->data;\n        seaf_commit_unref (commit);\n        list = g_list_delete_link (list, list);\n    }\n\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_deleted_entries (SeafRepoManager *mgr,\n                                       const char *repo_id,\n                                       int show_days,\n                                       const char *path,\n                                       const char *scan_stat,\n                                       int limit,\n                                       GError **error)\n{\n    SeafRepo *repo;\n    gint64 truncate_time, show_time;\n    GList *ret = NULL;\n    char *next_scan_stat = NULL;\n\n    truncate_time = seaf_repo_manager_get_repo_truncate_time (mgr, repo_id);\n    if (truncate_time == 0) {\n        // Don't keep history, set scan_stat as NULL, indicate no need for next scan\n        ret = g_list_append (ret, g_object_new (SEAFILE_TYPE_DELETED_ENTRY,\n                                                \"scan_stat\", NULL,\n                                                NULL));\n        return ret;\n    }\n\n    if (show_days <= 0)\n        show_time = -1;\n    else\n        show_time = (gint64)time(NULL) - show_days * 24 * 3600;\n\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Invalid repo id\");\n        return NULL;\n    }\n\n    CollectDelData data = {0};\n    GHashTable *entries = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                 g_free, g_object_unref);\n    data.repo = repo;\n    data.entries = entries;\n    data.truncate_time = MAX (show_time, truncate_time);\n    if (path) {\n        if (path[strlen(path) - 1] == '/') {\n            data.path = g_strdup (path);\n        } else {\n            data.path = g_strconcat (path, \"/\", NULL);\n        }\n    } else {\n        data.path = g_strdup (\"/\");\n    }\n\n    if (!scan_commits_for_collect_deleted (&data, scan_stat, limit, &next_scan_stat)) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_INTERNAL,\n                     \"Internal error\");\n        g_hash_table_destroy (entries);\n        seaf_repo_unref (repo);\n        g_free (data.path);\n        g_free (next_scan_stat);\n        return NULL;\n    }\n\n    /* Remove entries exist in the current commit.\n     * This is necessary because some files may be added back after deletion.\n     */\n    if (filter_out_existing_entries (entries, repo,\n                                     repo->head->commit_id) == 0) {\n        // filter success, then add collected result to list\n        g_hash_table_foreach_steal (entries, hash_to_list, &ret);\n    }\n\n    // Append scan_stat entry to the end to indicate the end of scan result\n    ret = g_list_append (ret, g_object_new (SEAFILE_TYPE_DELETED_ENTRY,\n                                            \"scan_stat\", next_scan_stat,\n                                            NULL));\n\n    g_hash_table_destroy (entries);\n\n    seaf_repo_unref (repo);\n    g_free (data.path);\n    g_free (next_scan_stat);\n\n    return ret;\n}\n\nstatic SeafCommit *\nget_commit(SeafRepo *repo, const char *branch_or_commit)\n{\n    SeafBranch *b;\n    SeafCommit *c;\n\n    b = seaf_branch_manager_get_branch (seaf->branch_mgr, repo->id,\n                                        branch_or_commit);\n    if (!b) {\n        if (strcmp(branch_or_commit, \"HEAD\") == 0)\n            c = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                repo->id, repo->version, \n                                                repo->head->commit_id);\n        else\n            c = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                repo->id, repo->version, \n                                                branch_or_commit);\n    } else {\n        c = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                            repo->id, repo->version, \n                                            b->commit_id);\n    }\n\n    if (b)\n        seaf_branch_unref (b);\n    \n    return c;\n}\n\nGList *\nseaf_repo_diff (SeafRepo *repo, const char *old, const char *new, int fold_dir_results, char **error)\n{\n    SeafCommit *c1 = NULL, *c2 = NULL;\n    int ret = 0;\n    GList *diff_entries = NULL;\n\n    g_return_val_if_fail (*error == NULL, NULL);\n\n    c2 = get_commit (repo, new);\n    if (!c2) {\n        *error = g_strdup(\"Can't find new commit\");\n        return NULL;\n    }\n    \n    if (old == NULL || old[0] == '\\0') {\n        if (c2->parent_id && c2->second_parent_id) {\n            ret = diff_merge (c2, &diff_entries, fold_dir_results);\n            seaf_commit_unref (c2);\n            if (ret < 0) {\n                *error = g_strdup(\"Failed to do diff\");\n                g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free);\n                return NULL;\n            }\n            return diff_entries;\n        }\n\n        if (!c2->parent_id) {\n            seaf_commit_unref (c2);\n            return NULL;\n        }\n        c1 = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version, \n                                             c2->parent_id);\n    } else {\n        c1 = get_commit (repo, old);\n    }\n\n    if (!c1) {\n        *error = g_strdup(\"Can't find old commit\");\n        seaf_commit_unref (c2);\n        return NULL;\n    }\n\n    /* do diff */\n    ret = diff_commits (c1, c2, &diff_entries, fold_dir_results);\n    if (ret < 0) {\n        g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free);\n        diff_entries = NULL;\n        *error = g_strdup(\"Failed to do diff\");\n    }\n\n    seaf_commit_unref (c1);\n    seaf_commit_unref (c2);\n\n    return diff_entries;\n}\n\n"
  },
  {
    "path": "server/repo-perm.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"utils.h\"\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"repo-mgr.h\"\n\n#include \"seafile-error.h\"\n#include \"seaf-utils.h\"\n/*\n * Permission priority: owner --> personal share --> group share --> public.\n * Permission with higher priority overwrites those with lower priority.\n */\n\nstatic gboolean\ncheck_repo_share_perm_cb (SeafDBRow *row, void *data)\n{\n    char **orig_perm = data;\n    char *perm = g_strdup (seaf_db_row_get_column_text (row, 0));\n\n    if (g_strcmp0(perm, \"rw\") == 0) {\n        g_free (*orig_perm);\n        *orig_perm = perm;\n        return FALSE;\n    } else if (g_strcmp0(perm, \"r\") == 0 && !(*orig_perm)) {\n        *orig_perm = perm;\n        return TRUE;\n    }\n\n    g_free (perm);\n    return TRUE;\n}\n\nstatic char *\ncheck_group_permission_by_user (SeafRepoManager *mgr,\n                                const char *repo_id,\n                                const char *user_name)\n{\n    char *permission = NULL;\n    GList *groups = NULL, *p1;\n    CcnetGroup *group;\n    int group_id;\n    GString *sql;\n\n    /* Get the groups this user belongs to. */\n    groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user_name,\n                                                     1, NULL);\n    if (!groups) {\n        goto out;\n    }\n\n    sql = g_string_new (\"\");\n    g_string_printf (sql, \"SELECT permission FROM RepoGroup WHERE repo_id = ? AND group_id IN (\");\n    for (p1 = groups; p1 != NULL; p1 = p1->next) {\n        group = p1->data;\n        g_object_get (group, \"id\", &group_id, NULL);\n\n        g_string_append_printf (sql, \"%d\", group_id);\n        if (p1->next)\n            g_string_append_printf (sql, \",\");\n    }\n    g_string_append_printf (sql, \")\");\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql->str,\n                                       check_repo_share_perm_cb, &permission,\n                                       1, \"string\", repo_id) < 0) {\n        seaf_warning (\"DB error when get repo share permission for repo %s.\\n\", repo_id);\n    }\n\n    g_string_free (sql, TRUE);\n\nout:\n    for (p1 = groups; p1 != NULL; p1 = p1->next)\n        g_object_unref ((GObject *)p1->data);\n    g_list_free (groups);\n    return permission;\n}\n\nstatic char *\ncheck_repo_share_permission (SeafRepoManager *mgr,\n                             const char *repo_id,\n                             const char *user_name)\n{\n    char *permission;\n\n    permission = seaf_share_manager_check_permission (seaf->share_mgr,\n                                                      repo_id,\n                                                      user_name);\n    if (permission != NULL)\n        return permission;\n\n    permission = check_group_permission_by_user (mgr, repo_id, user_name);\n    if (permission != NULL)\n        return permission;\n\n    if (!mgr->seaf->cloud_mode)\n        return seaf_repo_manager_get_inner_pub_repo_perm (mgr, repo_id);\n\n    return NULL;\n}\n\n// get dir perm from all dir perms in parent repo\n// such as path /a/b, then check /a/b, /a in parent\nstatic char *\nget_dir_perm (GHashTable *perms, const char *path)\n{\n    char *tmp = g_strdup (path);\n    char *slash;\n    char *perm = NULL;\n\n    while (g_strcmp0 (tmp, \"\") != 0) {\n        perm = g_hash_table_lookup (perms, tmp);\n        if (perm)\n            break;\n        slash = g_strrstr (tmp, \"/\");\n        *slash = '\\0';\n    }\n\n    g_free (tmp);\n\n    return g_strdup (perm);\n}\n\nstatic char *\ncheck_perm_on_parent_repo (const char *origin_repo_id,\n                           const char *user,\n                           const char *vpath)\n{\n    GHashTable *user_perms = NULL;\n    GHashTable *group_perms = NULL;\n    GList *groups = NULL;\n    GList *iter;\n    char *perm = NULL;\n\n    user_perms = seaf_share_manager_get_shared_dirs_to_user (seaf->share_mgr,\n                                                             origin_repo_id,\n                                                             user);\n\n    if (!user_perms) {\n        return NULL;\n    }\n\n    if (g_hash_table_size (user_perms) > 0) {\n        perm = get_dir_perm (user_perms, vpath);\n        if (perm) {\n            g_hash_table_destroy (user_perms);\n            return perm;\n        }\n    }\n    g_hash_table_destroy (user_perms);\n\n    groups = ccnet_group_manager_get_groups_by_user (seaf->group_mgr, user,\n                                                     1, NULL);\n    if (!groups) {\n        return NULL;\n    }\n\n    group_perms = seaf_share_manager_get_shared_dirs_to_group (seaf->share_mgr,\n                                                               origin_repo_id,\n                                                               groups);\n\n    for (iter = groups; iter; iter = iter->next)\n        g_object_unref ((GObject *)iter->data);\n    g_list_free (groups);\n\n    if (!group_perms) {\n        return NULL;\n    }\n    if (g_hash_table_size (group_perms) > 0) {\n        perm = get_dir_perm (group_perms, vpath);\n    }\n    g_hash_table_destroy (group_perms);\n\n    return perm;\n}\n\nstatic char *\ncheck_virtual_repo_permission (SeafRepoManager *mgr,\n                               const char *repo_id,\n                               const char *origin_repo_id,\n                               const char *user,\n                               const char *vpath)\n{\n    char *owner = NULL;\n    char *permission = NULL;\n\n    /* If I'm the owner of origin repo, I have full access to sub-repos. */\n    owner = seaf_repo_manager_get_repo_owner (mgr, origin_repo_id);\n    if (g_strcmp0 (user, owner) == 0) {\n        g_free (owner);\n        permission = g_strdup(\"rw\");\n        return permission;\n    }\n    g_free (owner);\n\n    /* If I'm not the owner of origin repo, this sub-repo can be created\n     * from a shared repo by me or directly shared by others to me.\n     * The priority of shared sub-folder is higher than top-level repo.\n     */\n    permission = check_perm_on_parent_repo (origin_repo_id,\n                                            user, vpath);\n    if (permission) {\n        return permission;\n    }\n\n    permission = check_repo_share_permission (mgr, origin_repo_id, user);\n\n    return permission;\n}\n\n/*\n * Comprehensive repo access permission checker.\n *\n * Returns read/write permission.\n */\nchar *\nseaf_repo_manager_check_permission (SeafRepoManager *mgr,\n                                    const char *repo_id,\n                                    const char *user,\n                                    GError **error)\n{\n    SeafVirtRepo *vinfo;\n    char *owner = NULL;\n    char *permission = NULL;\n\n    /* This is a virtual repo.*/\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (vinfo) {\n        permission = check_virtual_repo_permission (mgr, repo_id,\n                                                    vinfo->origin_repo_id,\n                                                    user, vinfo->path);\n        goto out;\n    }\n\n    owner = seaf_repo_manager_get_repo_owner (mgr, repo_id);\n    if (owner != NULL) {\n        if (strcmp (owner, user) == 0)\n            permission = g_strdup(\"rw\");\n        else\n            permission = check_repo_share_permission (mgr, repo_id, user);\n    }\n\nout:\n    seaf_virtual_repo_info_free (vinfo);\n    g_free (owner);\n    return permission;\n}\n\n/*\n * Directories are always before files. Otherwise compare the names.\n */\nstatic gint\ncomp_dirent_func (gconstpointer a, gconstpointer b)\n{\n    const SeafDirent *dent_a = a, *dent_b = b;\n\n    if (S_ISDIR(dent_a->mode) && S_ISREG(dent_b->mode))\n        return -1;\n\n    if (S_ISREG(dent_a->mode) && S_ISDIR(dent_b->mode))\n        return 1;\n\n    return strcasecmp (dent_a->name, dent_b->name);\n}\n\nGList *\nseaf_repo_manager_list_dir_with_perm (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *dir_path,\n                                      const char *dir_id,\n                                      const char *user,\n                                      int offset,\n                                      int limit,\n                                      GError **error)\n{\n    SeafRepo *repo;\n    char *perm = NULL;\n    SeafDir *dir;\n    SeafDirent *dent;\n    SeafileDirent *d;\n    GList *res = NULL;\n    GList *p;\n\n    perm = seaf_repo_manager_check_permission (mgr, repo_id, user, error);\n    if (!perm) {\n        if (*error == NULL)\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Access denied\");\n        return NULL;\n    }\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad repo id\");\n        g_free (perm);\n        return NULL;\n    }\n\n    dir = seaf_fs_manager_get_seafdir (seaf->fs_mgr,\n                                       repo->store_id, repo->version, dir_id);\n    if (!dir) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_DIR_ID, \"Bad dir id\");\n        seaf_repo_unref (repo);\n        g_free (perm);\n        return NULL;\n    }\n\n    dir->entries = g_list_sort (dir->entries, comp_dirent_func);\n\n    if (offset < 0) {\n        offset = 0;\n    }\n\n    int index = 0;\n    gboolean is_shared;\n    char *cur_path;\n    GHashTable *shared_sub_dirs = NULL;\n\n    if (!repo->virtual_info) {\n        char *repo_owner = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n        if (repo_owner && strcmp (user, repo_owner) == 0) {\n            shared_sub_dirs = seaf_share_manager_get_shared_sub_dirs (seaf->share_mgr,\n                                                                      repo->store_id,\n                                                                      dir_path);\n        }\n        g_free (repo_owner);\n    }\n\n    for (p = dir->entries; p != NULL; p = p->next, index++) {\n        if (index < offset) {\n            continue;\n        }\n\n        if (limit > 0) {\n            if (index >= offset + limit)\n                break;\n        }\n\n        dent = p->data;\n\n        if (!is_object_id_valid (dent->id))\n            continue;\n\n        d = g_object_new (SEAFILE_TYPE_DIRENT,\n                          \"obj_id\", dent->id,\n                          \"obj_name\", dent->name,\n                          \"mode\", dent->mode,\n                          \"version\", dent->version,\n                          \"mtime\", dent->mtime,\n                          \"size\", dent->size,\n                          \"permission\", perm,\n                          \"modifier\", dent->modifier,\n                          NULL);\n\n        if (shared_sub_dirs && S_ISDIR(dent->mode)) {\n            if (strcmp (dir_path, \"/\") == 0) {\n                cur_path = g_strconcat (dir_path, dent->name, NULL);\n            } else {\n                cur_path = g_strconcat (dir_path, \"/\", dent->name, NULL);\n            }\n            is_shared = g_hash_table_lookup (shared_sub_dirs, cur_path) ? TRUE : FALSE;\n            g_free (cur_path);\n            g_object_set (d, \"is_shared\", is_shared, NULL);\n        }\n        res = g_list_prepend (res, d);\n    }\n\n    if (shared_sub_dirs)\n        g_hash_table_destroy (shared_sub_dirs);\n    seaf_dir_free (dir);\n    seaf_repo_unref (repo);\n    g_free (perm);\n    if (res)\n        res = g_list_reverse (res);\n\n    return res;\n}\n"
  },
  {
    "path": "server/seaf-server.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#else\n#include <event.h>\n#endif\n\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <errno.h>\n#include <getopt.h>\n#include <signal.h>\n\n#include <glib.h>\n#include <glib-object.h>\n\n#include <searpc-server.h>\n#include <searpc-client.h>\n\n#include \"seafile-session.h\"\n#include \"seafile-rpc.h\"\n#include \"log.h\"\n#include \"utils.h\"\n\n#include \"cdc/cdc.h\"\n\nSeafileSession *seaf;\n\nchar *pidfile = NULL;\n\nstatic const char *short_options = \"hvc:d:l:fP:D:F:p:tr:\";\nstatic struct option long_options[] = {\n    { \"help\", no_argument, NULL, 'h', },\n    { \"version\", no_argument, NULL, 'v', },\n    { \"config-file\", required_argument, NULL, 'c' },\n    { \"central-config-dir\", required_argument, NULL, 'F' },\n    { \"seafdir\", required_argument, NULL, 'd' },\n    { \"log\", required_argument, NULL, 'l' },\n    { \"debug\", required_argument, NULL, 'D' },\n    { \"foreground\", no_argument, NULL, 'f' },\n    { \"pidfile\", required_argument, NULL, 'P' },\n    { \"rpc-pipe-path\", required_argument, NULL, 'p' },\n    { \"test-config\", no_argument, NULL, 't' },\n    { \"repair-repo\", required_argument, NULL, 'r' },\n    { NULL, 0, NULL, 0, },\n};\n\nstatic void usage ()\n{\n    fprintf (stderr, \"usage: seaf-server [-c config_dir] [-d seafile_dir]\\n\");\n}\n\n#include <searpc.h>\n#include \"searpc-signature.h\"\n#include \"searpc-marshal.h\"\n#include <searpc-named-pipe-transport.h>\n\n#define SEAFILE_RPC_PIPE_NAME \"seafile.sock\"\n\n#define NAMED_PIPE_SERVER_THREAD_POOL_SIZE 50\n\nstatic void start_rpc_service (const char *seafile_dir,\n                               const char *rpc_pipe_path)\n{\n    SearpcNamedPipeServer *rpc_server = NULL;\n    char *pipe_path = NULL;\n\n    searpc_server_init (register_marshals);\n\n    searpc_create_service (\"seafserv-threaded-rpcserver\");\n\n    /* threaded services */\n\n    /* repo manipulation */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repos_by_id_prefix,\n                                     \"seafile_get_repos_by_id_prefix\",\n                                     searpc_signature_objlist__string_int_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repo,\n                                     \"seafile_get_repo\",\n                                     searpc_signature_object__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_destroy_repo,\n                                     \"seafile_destroy_repo\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repo_list,\n                                     \"seafile_get_repo_list\",\n                                     searpc_signature_objlist__int_int_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_count_repos,\n                                     \"seafile_count_repos\",\n                                     searpc_signature_int64__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_repo_owner,\n                                     \"seafile_set_repo_owner\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repo_owner,\n                                     \"seafile_get_repo_owner\",\n                                     searpc_signature_string__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_orphan_repo_list,\n                                     \"seafile_get_orphan_repo_list\",\n                                     searpc_signature_objlist__void());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_edit_repo,\n                                     \"seafile_edit_repo\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_change_repo_passwd,\n                                     \"seafile_change_repo_passwd\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_upgrade_repo_pwd_hash_algorithm,\n                                     \"seafile_upgrade_repo_pwd_hash_algorithm\",\n                                     searpc_signature_int__string_string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_is_repo_owner,\n                                     \"seafile_is_repo_owner\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_owned_repos,\n                                     \"seafile_list_owned_repos\",\n                                     searpc_signature_objlist__string_int_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_search_repos_by_name,\n                                     \"seafile_search_repos_by_name\",\n                                     searpc_signature_objlist__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_server_repo_size,\n                                     \"seafile_server_repo_size\",\n                                     searpc_signature_int64__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_repo_set_access_property,\n                                     \"seafile_repo_set_access_property\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_repo_query_access_property,\n                                     \"seafile_repo_query_access_property\",\n                                     searpc_signature_string__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_revert_on_server,\n                                     \"seafile_revert_on_server\",\n                                     searpc_signature_int__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_diff,\n                                     \"seafile_diff\",\n                                     searpc_signature_objlist__string_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_post_file,\n                                     \"seafile_post_file\",\n                    searpc_signature_int__string_string_string_string_string());\n\n    /* searpc_server_register_function (\"seafserv-threaded-rpcserver\", */\n    /*                                  seafile_post_file_blocks, */\n    /*                                  \"seafile_post_file_blocks\", */\n    /*                 searpc_signature_string__string_string_string_string_string_string_int64_int()); */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_post_multi_files,\n                                     \"seafile_post_multi_files\",\n                    searpc_signature_string__string_string_string_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_put_file,\n                                     \"seafile_put_file\",\n                    searpc_signature_string__string_string_string_string_string_string());\n    /* searpc_server_register_function (\"seafserv-threaded-rpcserver\", */\n    /*                                  seafile_put_file_blocks, */\n    /*                                  \"seafile_put_file_blocks\", */\n    /*                 searpc_signature_string__string_string_string_string_string_string_string_int64()); */\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_post_empty_file,\n                                     \"seafile_post_empty_file\",\n                        searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_post_dir,\n                                     \"seafile_post_dir\",\n                        searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_mkdir_with_parents,\n                                     \"seafile_mkdir_with_parents\",\n                        searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_del_file,\n                                     \"seafile_del_file\",\n                        searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_batch_del_files,\n                                     \"seafile_batch_del_files\",\n                        searpc_signature_int__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_copy_file,\n                                     \"seafile_copy_file\",\n       searpc_signature_object__string_string_string_string_string_string_string_int_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_move_file,\n                                     \"seafile_move_file\",\n       searpc_signature_object__string_string_string_string_string_string_int_string_int_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_rename_file,\n                                     \"seafile_rename_file\",\n                    searpc_signature_int__string_string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_is_valid_filename,\n                                     \"seafile_is_valid_filename\",\n                                     searpc_signature_int__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_create_repo,\n                                     \"seafile_create_repo\",\n                                     searpc_signature_string__string_string_string_string_int_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_create_enc_repo,\n                                     \"seafile_create_enc_repo\",\n                                     searpc_signature_string__string_string_string_string_string_string_string_int_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_commit,\n                                     \"seafile_get_commit\",\n                                     searpc_signature_object__string_int_string());\n    \n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_dir,\n                                     \"seafile_list_dir\",\n                                     searpc_signature_objlist__string_string_int_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_dir_with_perm,\n                                     \"list_dir_with_perm\",\n                                     searpc_signature_objlist__string_string_string_string_int_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_file_blocks,\n                                     \"seafile_list_file_blocks\",\n                                     searpc_signature_string__string_string_int_int());\n    \n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_file_size,\n                                     \"seafile_get_file_size\",\n                                     searpc_signature_int64__string_int_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_dir_size,\n                                     \"seafile_get_dir_size\",\n                                     searpc_signature_int64__string_int_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_dir_by_path,\n                                     \"seafile_list_dir_by_path\",\n                                     searpc_signature_objlist__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_dir_id_by_commit_and_path,\n                                     \"seafile_get_dir_id_by_commit_and_path\",\n                                     searpc_signature_string__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_file_id_by_path,\n                                     \"seafile_get_file_id_by_path\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_dir_id_by_path,\n                                     \"seafile_get_dir_id_by_path\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_dirent_by_path,\n                                     \"seafile_get_dirent_by_path\",\n                                     searpc_signature_object__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_file_revisions,\n                                     \"seafile_list_file_revisions\",\n                                     searpc_signature_objlist__string_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_calc_files_last_modified,\n                                     \"seafile_calc_files_last_modified\",\n                                     searpc_signature_objlist__string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_revert_file,\n                                     \"seafile_revert_file\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_revert_dir,\n                                     \"seafile_revert_dir\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_check_repo_blocks_missing,\n                                     \"seafile_check_repo_blocks_missing\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_deleted,\n                                     \"get_deleted\",\n                                     searpc_signature_objlist__string_int_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_total_file_number,\n                                     \"get_total_file_number\",\n                                     searpc_signature_int64__void());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_total_storage,\n                                     \"get_total_storage\",\n                                     searpc_signature_int64__void());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_file_count_info_by_path,\n                                     \"get_file_count_info_by_path\",\n                                     searpc_signature_object__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_trash_repo_owner,\n                                     \"get_trash_repo_owner\",\n                                     searpc_signature_string__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_convert_repo_path,\n                                     \"convert_repo_path\",\n                                     searpc_signature_string__string_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_search_files,\n                                     \"search_files\",\n                                     searpc_signature_objlist__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_search_files_by_path,\n                                     \"search_files_by_path\",\n                                     searpc_signature_objlist__string_string_string());\n\n    /* share repo to user */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_add_share,\n                                     \"seafile_add_share\",\n                                     searpc_signature_int__string_string_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_share_repos,\n                                     \"seafile_list_share_repos\",\n                                     searpc_signature_objlist__string_string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_repo_shared_to,\n                                     \"seafile_list_repo_shared_to\",\n                                     searpc_signature_objlist__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_remove_share,\n                                     \"seafile_remove_share\",\n                                     searpc_signature_int__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_share_permission,\n                                     \"set_share_permission\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_share_subdir_to_user,\n                                     \"share_subdir_to_user\",\n                                     searpc_signature_string__string_string_string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_unshare_subdir_for_user,\n                                     \"unshare_subdir_for_user\",\n                                     searpc_signature_int__string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_update_share_subdir_perm_for_user,\n                                     \"update_share_subdir_perm_for_user\",\n                                     searpc_signature_int__string_string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_shared_repo_by_path,\n                                     \"get_shared_repo_by_path\",\n                                     searpc_signature_object__string_string_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_shared_users_by_repo,\n                                     \"get_shared_users_by_repo\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_org_get_shared_users_by_repo,\n                                     \"org_get_shared_users_by_repo\",\n                                     searpc_signature_objlist__int_string());\n\n    /* share repo to group */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_group_share_repo,\n                                     \"seafile_group_share_repo\",\n                                     searpc_signature_int__string_int_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_group_unshare_repo,\n                                     \"seafile_group_unshare_repo\",\n                                     searpc_signature_int__string_int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_shared_groups_by_repo,\n                                     \"seafile_get_shared_groups_by_repo\",\n                                     searpc_signature_string__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_share_subdir_to_group,\n                                     \"share_subdir_to_group\",\n                                     searpc_signature_string__string_string_string_int_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_unshare_subdir_for_group,\n                                     \"unshare_subdir_for_group\",\n                                     searpc_signature_int__string_string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_update_share_subdir_perm_for_group,\n                                     \"update_share_subdir_perm_for_group\",\n                                     searpc_signature_int__string_string_string_int_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_group_repoids,\n                                     \"seafile_get_group_repoids\",\n                                     searpc_signature_string__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_repo_shared_group,\n                                     \"seafile_list_repo_shared_group\",\n                                     searpc_signature_objlist__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_group_shared_repo_by_path,\n                                     \"get_group_shared_repo_by_path\",\n                                     searpc_signature_object__string_string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_group_repos_by_user,\n                                     \"get_group_repos_by_user\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_org_group_repos_by_user,\n                                     \"get_org_group_repos_by_user\",\n                                     searpc_signature_objlist__string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repos_by_group,\n                                     \"seafile_get_repos_by_group\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_group_repos_by_owner,\n                                     \"get_group_repos_by_owner\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_group_repo_owner,\n                                     \"get_group_repo_owner\",\n                                     searpc_signature_string__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_remove_repo_group,\n                                     \"seafile_remove_repo_group\",\n                                     searpc_signature_int__int_string());    \n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_group_repo_permission,\n                                     \"set_group_repo_permission\",\n                                     searpc_signature_int__int_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_shared_users_for_subdir,\n                                     \"seafile_get_shared_users_for_subdir\",\n                                     searpc_signature_objlist__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_shared_groups_for_subdir,\n                                     \"seafile_get_shared_groups_for_subdir\",\n                                     searpc_signature_objlist__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_repo_has_been_shared,\n                                     \"repo_has_been_shared\",\n                                     searpc_signature_int__string_int());\n\n    /* branch and commit */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_branch_gets,\n                                     \"seafile_branch_gets\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_commit_list,\n                                     \"seafile_get_commit_list\",\n                                     searpc_signature_objlist__string_int_int());\n\n    /* token */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_generate_repo_token,\n                                     \"seafile_generate_repo_token\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_delete_repo_token,\n                                     \"seafile_delete_repo_token\",\n                                     searpc_signature_int__string_string_string());\n    \n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_repo_tokens,\n                                     \"seafile_list_repo_tokens\",\n                                     searpc_signature_objlist__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_repo_tokens_by_email,\n                                     \"seafile_list_repo_tokens_by_email\",\n                                     searpc_signature_objlist__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_delete_repo_tokens_by_peer_id,\n                                     \"seafile_delete_repo_tokens_by_peer_id\",\n                                     searpc_signature_int__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_delete_repo_tokens_by_email,\n                                     \"delete_repo_tokens_by_email\",\n                                     searpc_signature_int__string());\n    \n    /* quota */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_user_quota_usage,\n                                     \"seafile_get_user_quota_usage\",\n                                     searpc_signature_int64__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_user_share_usage,\n                                     \"seafile_get_user_share_usage\",\n                                     searpc_signature_int64__string());\n\n    /* virtual repo */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_create_virtual_repo,\n                                     \"create_virtual_repo\",\n                                     searpc_signature_string__string_string_string_string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_virtual_repos_by_owner,\n                                     \"get_virtual_repos_by_owner\",\n                                     searpc_signature_objlist__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_virtual_repo,\n                                     \"get_virtual_repo\",\n                                     searpc_signature_object__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_upload_tmp_file_offset,\n                                     \"seafile_get_upload_tmp_file_offset\",\n                                     searpc_signature_int64__string_string());\n\n    /* Clean trash */\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_clean_up_repo_history,\n                                     \"clean_up_repo_history\",\n                                     searpc_signature_int__string_int());\n\n    /* -------- rpc services -------- */\n    /* token for web access to repo */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_web_get_access_token,\n                                     \"seafile_web_get_access_token\",\n                                     searpc_signature_string__string_string_string_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_web_query_access_token,\n                                     \"seafile_web_query_access_token\",\n                                     searpc_signature_object__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_query_zip_progress,\n                                     \"seafile_query_zip_progress\",\n                                     searpc_signature_string__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_cancel_zip_task,\n                                     \"cancel_zip_task\",\n                                     searpc_signature_int__string());\n\n    /* Copy task related. */\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_copy_task,\n                                     \"get_copy_task\",\n                                     searpc_signature_object__string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_cancel_copy_task,\n                                     \"cancel_copy_task\",\n                                     searpc_signature_int__string());\n\n    /* password management */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_check_passwd,\n                                     \"seafile_check_passwd\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_passwd,\n                                     \"seafile_set_passwd\",\n                                     searpc_signature_int__string_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_unset_passwd,\n                                     \"seafile_unset_passwd\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_is_passwd_set,\n                                     \"seafile_is_passwd_set\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_decrypt_key,\n                                     \"seafile_get_decrypt_key\",\n                                     searpc_signature_object__string_string());\n\n    /* quota management */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_user_quota,\n                                     \"set_user_quota\",\n                                     searpc_signature_int__string_int64());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_user_quota,\n                                     \"get_user_quota\",\n                                     searpc_signature_int64__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_check_quota,\n                                     \"check_quota\",\n                                     searpc_signature_int__string_int64());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_user_quota_usage,\n                                     \"list_user_quota_usage\",\n                                     searpc_signature_objlist__void());\n\n    /* repo permission */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_check_permission,\n                                     \"check_permission\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_repo_status,\n                                     \"set_repo_status\",\n                                     searpc_signature_int__string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repo_status,\n                                     \"get_repo_status\",\n                                     searpc_signature_int__string());\n\n    /* folder permission */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_check_permission_by_path,\n                                     \"check_permission_by_path\",\n                                     searpc_signature_string__string_string_string());\n    \n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_file_id_by_commit_and_path,\n                                     \"seafile_get_file_id_by_commit_and_path\",\n                                     searpc_signature_string__string_string_string());\n\n    /* event */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_publish_event,\n                                     \"publish_event\",\n                                     searpc_signature_int__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_pop_event,\n                                     \"pop_event\",\n                                     searpc_signature_json__string());\n\n                                     \n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_inner_pub_repo,\n                                     \"set_inner_pub_repo\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_unset_inner_pub_repo,\n                                     \"unset_inner_pub_repo\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_is_inner_pub_repo,\n                                     \"is_inner_pub_repo\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_inner_pub_repos,\n                                     \"list_inner_pub_repos\",\n                                     searpc_signature_objlist__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_count_inner_pub_repos,\n                                     \"count_inner_pub_repos\",\n                                     searpc_signature_int64__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_list_inner_pub_repos_by_owner,\n                                     \"list_inner_pub_repos_by_owner\",\n                                     searpc_signature_objlist__string());\n\n    /* History */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_repo_history_limit,\n                                     \"set_repo_history_limit\",\n                                     searpc_signature_int__string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_repo_history_limit,\n                                     \"get_repo_history_limit\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_repo_valid_since,\n                                     \"set_repo_valid_since\",\n                                     searpc_signature_int__string_int64());\n\n    /* System default library */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_system_default_repo_id,\n                                     \"get_system_default_repo_id\",\n                                     searpc_signature_string__void());\n\n    /* Trashed repos. */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_trash_repo_list,\n                                     \"get_trash_repo_list\",\n                                     searpc_signature_objlist__int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_del_repo_from_trash,\n                                     \"del_repo_from_trash\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_restore_repo_from_trash,\n                                     \"restore_repo_from_trash\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_trash_repos_by_owner,\n                                     \"get_trash_repos_by_owner\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_empty_repo_trash,\n                                     \"empty_repo_trash\",\n                                     searpc_signature_int__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_empty_repo_trash_by_owner,\n                                     \"empty_repo_trash_by_owner\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_generate_magic_and_random_key,\n                                     \"generate_magic_and_random_key\",\n                                     searpc_signature_object__int_string_string());\n\n    /* Config */\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_server_config_int,\n                                     \"get_server_config_int\",\n                                     searpc_signature_int__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_server_config_int,\n                                     \"set_server_config_int\",\n                                     searpc_signature_int__string_string_int());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_server_config_int64,\n                                     \"get_server_config_int64\",\n                                     searpc_signature_int64__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_server_config_int64,\n                                     \"set_server_config_int64\",\n                                     searpc_signature_int__string_string_int64());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_server_config_string,\n                                     \"get_server_config_string\",\n                                     searpc_signature_string__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_server_config_string,\n                                     \"set_server_config_string\",\n                                     searpc_signature_int__string_string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_get_server_config_boolean,\n                                     \"get_server_config_boolean\",\n                                     searpc_signature_int__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     seafile_set_server_config_boolean,\n                                     \"set_server_config_boolean\",\n                                     searpc_signature_int__string_string_int());\n\n    /*user management*/\n   searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_add_emailuser,\n                                     \"add_emailuser\",\n                                     searpc_signature_int__string_string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_emailuser,\n                                     \"remove_emailuser\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_validate_emailuser,\n                                     \"validate_emailuser\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_emailuser,\n                                     \"get_emailuser\",\n                                     searpc_signature_object__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_emailuser_with_import,\n                                     \"get_emailuser_with_import\",\n                                     searpc_signature_object__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_emailuser_by_id,\n                                     \"get_emailuser_by_id\",\n                                     searpc_signature_object__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_emailusers,\n                                     \"get_emailusers\",\n                                     searpc_signature_objlist__string_int_int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_search_emailusers,\n                                     \"search_emailusers\",\n                                     searpc_signature_objlist__string_string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_count_emailusers,\n                                     \"count_emailusers\",\n                                     searpc_signature_int64__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_count_inactive_emailusers,\n                                     \"count_inactive_emailusers\",\n                                     searpc_signature_int64__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_update_emailuser,\n                                     \"update_emailuser\",\n                                     searpc_signature_int__string_int_string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_update_role_emailuser,\n                                     \"update_role_emailuser\",\n                                     searpc_signature_int__string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_superusers,\n                                     \"get_superusers\",\n                                     searpc_signature_objlist__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_emailusers_in_list,\n                                     \"get_emailusers_in_list\",\n                                     searpc_signature_objlist__string_string());\n\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_update_emailuser_id,\n                                     \"update_emailuser_id\",\n                                     searpc_signature_int__string_string());\n\n    /*group management*/\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_create_group,\n                                     \"create_group\",\n                                     searpc_signature_int__string_string_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_create_org_group,\n                                     \"create_org_group\",\n                                 searpc_signature_int__int_string_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_group,\n                                     \"remove_group\",\n                                     searpc_signature_int__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_group_add_member,\n                                     \"group_add_member\",\n                                     searpc_signature_int__int_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_group_remove_member,\n                                     \"group_remove_member\",\n                                     searpc_signature_int__int_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_group_set_admin,\n                                     \"group_set_admin\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_group_unset_admin,\n                                     \"group_unset_admin\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_set_group_name,\n                                     \"set_group_name\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_quit_group,\n                                     \"quit_group\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_groups,\n                                     \"get_groups\",\n                                     searpc_signature_objlist__string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                      ccnet_rpc_list_all_departments,\n                                     \"list_all_departments\",\n                                     searpc_signature_objlist__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_all_groups,\n                                     \"get_all_groups\",\n                                     searpc_signature_objlist__int_int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_ancestor_groups,\n                                     \"get_ancestor_groups\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_group,\n                                     \"get_group\",\n                                     searpc_signature_object__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_group_members,\n                                     \"get_group_members\",\n                                     searpc_signature_objlist__int_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_members_with_prefix,\n                                     \"get_members_with_prefix\",\n                                     searpc_signature_objlist__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_check_group_staff,\n                                     \"check_group_staff\",\n                                     searpc_signature_int__int_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_group_user,\n                                     \"remove_group_user\",\n                                     searpc_signature_int__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_is_group_user,\n                                     \"is_group_user\",\n                                     searpc_signature_int__int_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_set_group_creator,\n                                     \"set_group_creator\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_search_groups,\n                                     \"search_groups\",\n                                     searpc_signature_objlist__string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_search_group_members,\n                                     \"search_group_members\",\n                                     searpc_signature_objlist__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_top_groups,\n                                     \"get_top_groups\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_child_groups,\n                                     \"get_child_groups\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_descendants_groups,\n                                     \"get_descendants_groups\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_groups_members,\n                                     \"get_groups_members\",\n                                     searpc_signature_objlist__string());\n    /*org management*/\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_create_org,\n                                     \"create_org\",\n                                     searpc_signature_int__string_string_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_org,\n                                     \"remove_org\",\n                                     searpc_signature_int__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_all_orgs,\n                                     \"get_all_orgs\",\n                                     searpc_signature_objlist__int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_count_orgs,\n                                     \"count_orgs\",\n                                     searpc_signature_int64__void());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_by_url_prefix,\n                                     \"get_org_by_url_prefix\",\n                                     searpc_signature_object__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_by_id,\n                                     \"get_org_by_id\",\n                                     searpc_signature_object__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_add_org_user,\n                                     \"add_org_user\",\n                                     searpc_signature_int__int_string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_org_user,\n                                     \"remove_org_user\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_orgs_by_user,\n                                     \"get_orgs_by_user\",\n                                     searpc_signature_objlist__string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_emailusers,\n                                     \"get_org_emailusers\",\n                                     searpc_signature_objlist__string_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_add_org_group,\n                                     \"add_org_group\",\n                                     searpc_signature_int__int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_remove_org_group,\n                                     \"remove_org_group\",\n                                     searpc_signature_int__int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_is_org_group,\n                                     \"is_org_group\",\n                                     searpc_signature_int__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_id_by_group,\n                                     \"get_org_id_by_group\",\n                                     searpc_signature_int__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_groups,\n                                     \"get_org_groups\",\n                                     searpc_signature_objlist__int_int_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_groups_by_user,\n                                     \"get_org_groups_by_user\",\n                                     searpc_signature_objlist__string_int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_get_org_top_groups,\n                                     \"get_org_top_groups\",\n                                     searpc_signature_objlist__int());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_org_user_exists,\n                                     \"org_user_exists\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_is_org_staff,\n                                     \"is_org_staff\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_set_org_staff,\n                                     \"set_org_staff\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_unset_org_staff,\n                                     \"unset_org_staff\",\n                                     searpc_signature_int__int_string());\n    searpc_server_register_function (\"seafserv-threaded-rpcserver\",\n                                     ccnet_rpc_set_org_name,\n                                     \"set_org_name\",\n                                     searpc_signature_int__int_string());\n\n    if (rpc_pipe_path) {\n        pipe_path = g_build_path (\"/\", rpc_pipe_path, SEAFILE_RPC_PIPE_NAME, NULL);\n    } else {\n        pipe_path = g_build_path (\"/\", seafile_dir, SEAFILE_RPC_PIPE_NAME, NULL);\n    }\n    rpc_server = searpc_create_named_pipe_server_with_threadpool (pipe_path, NAMED_PIPE_SERVER_THREAD_POOL_SIZE);\n\n    g_free(pipe_path);\n    if (!rpc_server) {\n        seaf_warning (\"Failed to create rpc server.\\n\");\n        exit (1);\n    }\n\n    rpc_server->use_epoll = TRUE;\n\n    if (searpc_named_pipe_server_start(rpc_server) < 0) {\n        seaf_warning (\"Failed to start rpc server.\\n\");\n        exit (1);\n    }\n}\n\nstatic struct event sigusr1;\n\nstatic void sigusr1Handler (int fd, short event, void *user_data)\n{\n    seafile_log_reopen ();\n}\n\nstatic void\nset_signal_handlers (SeafileSession *session)\n{\n#ifndef WIN32\n    signal (SIGPIPE, SIG_IGN);\n\n    /* design as reopen log */\n    event_set(&sigusr1, SIGUSR1, EV_SIGNAL | EV_PERSIST, sigusr1Handler, NULL);\n    event_add(&sigusr1, NULL);\n#endif\n}\n\nstatic void\nremove_pidfile (const char *pidfile)\n{\n    if (pidfile) {\n        g_unlink (pidfile);\n    }\n}\n\nstatic int\nwrite_pidfile (const char *pidfile_path)\n{\n    if (!pidfile_path)\n        return -1;\n\n    pid_t pid = getpid();\n\n    FILE *pidfile = g_fopen(pidfile_path, \"w\");\n    if (!pidfile) {\n        seaf_warning (\"Failed to fopen() pidfile %s: %s\\n\",\n                      pidfile_path, strerror(errno));\n        return -1;\n    }\n\n    char buf[32];\n    snprintf (buf, sizeof(buf), \"%d\\n\", pid);\n    if (fputs(buf, pidfile) < 0) {\n        seaf_warning (\"Failed to write pidfile %s: %s\\n\",\n                      pidfile_path, strerror(errno));\n        fclose (pidfile);\n        return -1;\n    }\n\n    fflush (pidfile);\n    fclose (pidfile);\n    return 0;\n}\n\nstatic void\non_seaf_server_exit(void)\n{\n    if (pidfile)\n        remove_pidfile (pidfile);\n}\n\n#ifdef WIN32\n/* Get the commandline arguments in unicode, then convert them to utf8  */\nstatic char **\nget_argv_utf8 (int *argc)\n{\n    int i = 0;\n    char **argv = NULL;\n    const wchar_t *cmdline = NULL;\n    wchar_t **argv_w = NULL;\n\n    cmdline = GetCommandLineW();\n    argv_w = CommandLineToArgvW (cmdline, argc);\n    if (!argv_w) {\n        printf(\"failed to CommandLineToArgvW(), GLE=%lu\\n\", GetLastError());\n        return NULL;\n    }\n\n    argv = (char **)malloc (sizeof(char*) * (*argc));\n    for (i = 0; i < *argc; i++) {\n        argv[i] = wchar_to_utf8 (argv_w[i]);\n    }\n\n    return argv;\n}\n#endif\n\nint\ntest_seafile_config(const char *central_config_dir, const char *config_dir, const char *seafile_dir)\n{\n#if !GLIB_CHECK_VERSION(2, 36, 0)\n    g_type_init ();\n#endif\n\n    config_dir = ccnet_expand_path (config_dir);\n    if (central_config_dir) {\n        central_config_dir = ccnet_expand_path (central_config_dir);\n    }\n\n    seafile_log_init (\"-\", \"debug\", \"debug\", \"seaf-server\");\n\n    srand (time(NULL));\n\n    event_init ();\n\n    seaf = seafile_session_new (central_config_dir, seafile_dir, config_dir);\n    if (!seaf) {\n        seaf_error (\"Error: failed to create ccnet session\\n\");\n        return -1;\n    }\n\n    if (seafile_session_init (seaf) < 0)\n        return -1;\n\n    return 0;\n}\n\nint\nmain (int argc, char **argv)\n{\n    int c;\n    char *ccnet_dir = DEFAULT_CONFIG_DIR;\n    char *seafile_dir = NULL;\n    char *central_config_dir = NULL;\n    char *logfile = NULL;\n    char *rpc_pipe_path = NULL;\n    const char *debug_str = NULL;\n    int daemon_mode = 1;\n    gboolean test_config = FALSE;\n    char *repo_id = NULL;\n\n#ifdef WIN32\n    argv = get_argv_utf8 (&argc);\n#endif\n\n    while ((c = getopt_long (argc, argv, short_options, \n                             long_options, NULL)) != EOF)\n    {\n        switch (c) {\n        case 'h':\n            exit (1);\n            break;\n        case 'v':\n            exit (1);\n            break;\n        case 'c':\n            ccnet_dir = optarg;\n            break;\n        case 'd':\n            seafile_dir = g_strdup(optarg);\n            break;\n        case 'F':\n            central_config_dir = g_strdup(optarg);\n            break;\n        case 'f':\n            daemon_mode = 0;\n            break;\n        case 'l':\n            logfile = g_strdup(optarg);\n            break;\n        case 'D':\n            debug_str = optarg;\n            break;\n        case 'P':\n            pidfile = optarg;\n            break;\n        case 'p':\n            rpc_pipe_path = g_strdup (optarg);\n            break;\n        case 't':\n            test_config = TRUE;\n            break;\n        case 'r':\n            repo_id = g_strdup (optarg);\n            break;\n        default:\n            usage ();\n            exit (1);\n        }\n    }\n\n    argc -= optind;\n    argv += optind;\n\n    if (test_config) {\n        return test_seafile_config (central_config_dir, ccnet_dir, seafile_dir);\n    }\n\n    const char *log_to_stdout_env = g_getenv(\"SEAFILE_LOG_TO_STDOUT\");\n    if (g_strcmp0 (log_to_stdout_env, \"true\") == 0) {\n        daemon_mode = 0;\n    }\n\n#ifndef WIN32\n    if (daemon_mode) {\n#ifndef __APPLE__\n        daemon (1, 0);\n#else   /* __APPLE */\n        /* daemon is deprecated under APPLE\n         * use fork() instead\n         * */\n        switch (fork ()) {\n          case -1:\n              seaf_warning (\"Failed to daemonize\");\n              exit (-1);\n              break;\n          case 0:\n              /* all good*/\n              break;\n          default:\n              /* kill origin process */\n              exit (0);\n        }\n#endif  /* __APPLE */\n    }\n#endif /* !WIN32 */\n\n    cdc_init ();\n\n#if !GLIB_CHECK_VERSION(2, 35, 0)\n    g_type_init();\n#endif\n#if !GLIB_CHECK_VERSION(2,32,0)\n    g_thread_init (NULL);\n#endif\n\n    if (!debug_str)\n        debug_str = g_getenv(\"SEAFILE_DEBUG\");\n    seafile_debug_set_flags_string (debug_str);\n\n    if (seafile_dir == NULL)\n        seafile_dir = g_build_filename (ccnet_dir, \"seafile\", NULL);\n    if (logfile == NULL)\n        logfile = g_build_filename (seafile_dir, \"seafile.log\", NULL);\n\n    if (seafile_log_init (logfile, \"info\", \"debug\", \"seaf-server\") < 0) {\n        seaf_warning (\"Failed to init log.\\n\");\n        exit (1);\n    }\n\n    event_init ();\n\n    if (repo_id) {\n        seaf = seafile_repair_session_new (central_config_dir, seafile_dir, ccnet_dir);\n        if (!seaf) {\n            seaf_warning (\"Failed to create repair seafile session.\\n\");\n            exit (1);\n        }\n        seaf_repo_manager_repair_virtual_repo (repo_id);\n        exit (0);\n    }\n\n    seaf = seafile_session_new (central_config_dir, seafile_dir, ccnet_dir);\n    if (!seaf) {\n        seaf_warning (\"Failed to create seafile session.\\n\");\n        exit (1);\n    }\n\n\n#ifndef WIN32\n    set_syslog_config (seaf->config);\n#endif\n\n    set_signal_handlers (seaf);\n\n    /* Create pid file before connecting to database.\n     * Connecting to database and creating tables may take long if the db\n     * is on a remote host. This may make controller think seaf-server fails\n     * to start and restart it.\n     */\n    if (pidfile) {\n        if (write_pidfile (pidfile) < 0) {\n            ccnet_message (\"Failed to write pidfile\\n\");\n            return -1;\n        }\n    }\n\n    /* init seaf */\n    if (seafile_session_init (seaf) < 0)\n        exit (1);\n\n    if (seafile_session_start (seaf) < 0)\n        exit (1);\n\n    start_rpc_service (seafile_dir, rpc_pipe_path);\n    g_free (seafile_dir);\n    g_free (logfile);\n    g_free (rpc_pipe_path);\n\n    atexit (on_seaf_server_exit);\n\n    /* Create a system default repo to contain the tutorial file. */\n    schedule_create_system_default_repo (seaf);\n\n    event_dispatch ();\n\n    return 0;\n}\n"
  },
  {
    "path": "server/seafile-session.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <stdint.h>\n#include <dirent.h>\n#include <unistd.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n#include <fcntl.h>\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <errno.h>\n\n#include <glib.h>\n\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n\n#include \"mq-mgr.h\"\n#include \"seaf-db.h\"\n#include \"seaf-utils.h\"\n\n#include \"log.h\"\n\n#define CONNECT_INTERVAL_MSEC 10 * 1000\n\n#define DEFAULT_THREAD_POOL_SIZE 500\n\n#define DEFAULT_FIXED_BLOCK_SIZE ((gint64)1 << 23) /* 8MB */\n\nstatic void\nload_fileserver_config (SeafileSession *session)\n{\n    int web_token_expire_time;\n    int max_index_processing_threads;\n    int fixed_block_size_mb;\n    int max_indexing_threads;\n    gint64 max_upload_size;\n\n    web_token_expire_time = g_key_file_get_integer (session->config,\n                                                    \"fileserver\", \"web_token_expire_time\",\n                                                    NULL);\n    if (web_token_expire_time <= 0) {\n        session->web_token_expire_time = 3600;\n    } else {\n        session->web_token_expire_time = web_token_expire_time;\n    }\n\n    seaf_message (\"fileserver: web_token_expire_time = %d\\n\",\n                  session->web_token_expire_time);\n\n    max_index_processing_threads = g_key_file_get_integer (session->config,\n                                                           \"fileserver\", \"max_index_processing_threads\",\n                                                           NULL);\n    if (max_index_processing_threads <= 0) {\n        session->max_index_processing_threads = 3;\n    } else {\n        session->max_index_processing_threads = max_index_processing_threads;\n    }\n\n    seaf_message (\"fileserver: max_index_processing_threads= %d\\n\",\n                  session->max_index_processing_threads);\n\n\n    fixed_block_size_mb = g_key_file_get_integer (session->config,\n                                                  \"fileserver\", \"fixed_block_size\",\n                                                  NULL);\n    if (fixed_block_size_mb <= 0){\n        session->fixed_block_size = DEFAULT_FIXED_BLOCK_SIZE;\n    } else {\n        session->fixed_block_size = fixed_block_size_mb * ((gint64)1 << 20);\n    }\n\n    seaf_message (\"fileserver: fixed_block_size = %\"G_GINT64_FORMAT\"\\n\",\n                  session->fixed_block_size);\n\n    max_indexing_threads = g_key_file_get_integer (session->config,\n                                                   \"fileserver\", \"max_indexing_threads\",\n                                                   NULL);\n    if (max_indexing_threads <= 0) {\n        session->max_indexing_threads = 1;\n    } else {\n        session->max_indexing_threads = max_indexing_threads;\n    }\n\n    seaf_message (\"fileserver: max_indexing_threads = %d\\n\",\n                  session->max_indexing_threads);\n\n    GError *err = NULL;\n    max_upload_size = g_key_file_get_int64(session->config, \"fileserver\", \"max_upload_size\", &err);\n    if (err) {\n        max_upload_size = -1;\n        g_clear_error(&err);\n    } else if (max_upload_size > 0) {\n        max_upload_size = max_upload_size * 1000000;\n    }\n    session->max_upload_size = max_upload_size;\n\n    seaf_message (\"fileserver: max_upload_size = %d\\n\",\n                  session->max_upload_size);\n\n    return;\n}\n\nstatic int\nload_config (SeafileSession *session, const char *config_file_path)\n{\n    int ret = 0;\n    GError *error = NULL;\n    GKeyFile *config = NULL;\n    const char *notif_server = NULL;\n    const char *enable_notif_server = NULL;\n    const char *private_key = NULL;\n    const char *site_root = NULL;\n    const char *log_to_stdout = NULL;\n    const char *node_name = NULL;\n    const char *use_go_fileserver = NULL;\n\n    config = g_key_file_new ();\n    if (!g_key_file_load_from_file (config, config_file_path,\n                                    G_KEY_FILE_NONE, &error)) {\n        seaf_warning (\"Failed to load config file.\\n\");\n        ret = -1;\n        goto out;\n    }\n\n    session->config = config;\n\n    session->cloud_mode = g_key_file_get_boolean (config,\n                                                  \"general\", \"cloud_mode\",\n                                                  NULL);\n\n    session->go_fileserver = g_key_file_get_boolean (config,\n                                                     \"fileserver\", \"use_go_fileserver\",\n                                                     NULL);\n\n    session->obj_cache = objcache_new ();\n\n    // Read config from env\n    private_key = g_getenv(\"JWT_PRIVATE_KEY\");\n    site_root = g_getenv(\"SITE_ROOT\");\n    log_to_stdout = g_getenv(\"SEAFILE_LOG_TO_STDOUT\");\n    notif_server = g_getenv(\"INNER_NOTIFICATION_SERVER_URL\");\n    enable_notif_server = g_getenv(\"ENABLE_NOTIFICATION_SERVER\");\n    node_name = g_getenv(\"NODE_NAME\");\n    use_go_fileserver = g_getenv(\"ENABLE_GO_FILESERVER\");\n\n    if (!private_key) {\n        seaf_warning (\"Failed to read JWT_PRIVATE_KEY.\\n\");\n        ret = -1;\n        goto out;\n    }\n    if ((notif_server && g_strcmp0 (notif_server, \"\") != 0) &&\n        (enable_notif_server && g_strcmp0 (enable_notif_server, \"true\") == 0)) {\n        session->notif_server_private_key = g_strdup (private_key);\n        session->notif_url = g_strdup (notif_server);\n    }\n    session->seahub_pk = g_strdup (private_key);\n    if (!site_root || g_strcmp0 (site_root, \"\") == 0) {\n        site_root = \"/\";\n    }\n    session->seahub_url = g_strdup_printf(\"http://127.0.0.1:8000%sapi/v2.1/internal\", site_root);\n    session->seahub_conn_pool = connection_pool_new ();\n\n    if (g_strcmp0 (log_to_stdout, \"true\") == 0) {\n        session->log_to_stdout = TRUE;\n    }\n\n    if (!node_name || g_strcmp0 (node_name, \"\") == 0) {\n        node_name = \"default\";\n    }\n    session->node_name = g_strdup (node_name);\n\n    if (use_go_fileserver && g_strcmp0 (use_go_fileserver, \"true\") == 0) {\n        session->go_fileserver = TRUE; \n    }\n\nout:\n    if (ret < 0) {\n        if (config)\n            g_key_file_free (config);\n    }\n    return ret;\n}\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir,\n                    const char *seafile_dir,\n                    const char *ccnet_dir)\n{\n    char *abs_central_config_dir = NULL;\n    char *abs_seafile_dir;\n    char *abs_ccnet_dir = NULL;\n    char *tmp_file_dir;\n    char *config_file_path = NULL;\n    SeafileSession *session = NULL;\n\n    abs_ccnet_dir = ccnet_expand_path (ccnet_dir);\n    abs_seafile_dir = ccnet_expand_path (seafile_dir);\n    tmp_file_dir = g_build_filename (abs_seafile_dir, \"tmpfiles\", NULL);\n    if (central_config_dir) {\n        abs_central_config_dir = ccnet_expand_path (central_config_dir);\n    }\n\n    if (checkdir_with_mkdir (abs_seafile_dir) < 0) {\n        seaf_warning (\"Config dir %s does not exist and is unable to create\\n\",\n                   abs_seafile_dir);\n        goto onerror;\n    }\n\n    if (checkdir_with_mkdir (tmp_file_dir) < 0) {\n        seaf_warning (\"Temp file dir %s does not exist and is unable to create\\n\",\n                   tmp_file_dir);\n        goto onerror;\n    }\n\n    if (checkdir_with_mkdir (abs_ccnet_dir) < 0) {\n        seaf_warning (\"Ccnet config dir %s does not exist and is unable to create\\n\",\n                   abs_ccnet_dir);\n        goto onerror;\n    }\n\n    config_file_path = g_build_filename(\n        abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,\n        \"seafile.conf\", NULL);\n\n    session = g_new0(SeafileSession, 1);\n    session->seaf_dir = abs_seafile_dir;\n    session->ccnet_dir = abs_ccnet_dir;\n    session->tmp_file_dir = tmp_file_dir;\n\n    if (load_config (session, config_file_path) < 0) {\n        goto onerror;\n    }\n\n    load_fileserver_config (session);\n\n    if (load_database_config (session) < 0) {\n        seaf_warning (\"Failed to load database config.\\n\");\n        goto onerror;\n    }\n\n    if (load_ccnet_database_config (session) < 0) {\n        seaf_warning (\"Failed to load ccnet database config.\\n\");\n        goto onerror;\n    }\n\n    session->cfg_mgr = seaf_cfg_manager_new (session);\n    if (!session->cfg_mgr)\n        goto onerror;\n    session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);\n    if (!session->fs_mgr)\n        goto onerror;\n    session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);\n    if (!session->block_mgr)\n        goto onerror;\n    session->commit_mgr = seaf_commit_manager_new (session);\n    if (!session->commit_mgr)\n        goto onerror;\n    session->repo_mgr = seaf_repo_manager_new (session);\n    if (!session->repo_mgr)\n        goto onerror;\n    session->branch_mgr = seaf_branch_manager_new (session);\n    if (!session->branch_mgr)\n        goto onerror;\n\n    session->share_mgr = seaf_share_manager_new (session);\n    if (!session->share_mgr)\n        goto onerror;\n    \n    session->web_at_mgr = seaf_web_at_manager_new (session);\n    if (!session->web_at_mgr)\n        goto onerror;\n\n    session->passwd_mgr = seaf_passwd_manager_new (session);\n    if (!session->passwd_mgr)\n        goto onerror;\n\n    session->quota_mgr = seaf_quota_manager_new (session);\n    if (!session->quota_mgr)\n        goto onerror;\n\n    session->copy_mgr = seaf_copy_manager_new (session);\n    if (!session->copy_mgr)\n        goto onerror;\n\n    session->job_mgr = ccnet_job_manager_new (DEFAULT_THREAD_POOL_SIZE);\n\n    session->size_sched = size_scheduler_new (session);\n\n    session->mq_mgr = seaf_mq_manager_new ();\n    if (!session->mq_mgr)\n        goto onerror;\n\n#ifdef HAVE_EVHTP\n    session->http_server = seaf_http_server_new (session);\n    if (!session->http_server)\n        goto onerror;\n\n    session->zip_download_mgr = zip_download_mgr_new ();\n    if (!session->zip_download_mgr)\n        goto onerror;\n#endif\n\n    session->index_blocks_mgr = index_blocks_mgr_new (session);\n    if (!session->index_blocks_mgr)\n        goto onerror;\n\n    session->user_mgr = ccnet_user_manager_new (session);\n    if (!session->user_mgr)\n        goto onerror;\n\n    session->group_mgr = ccnet_group_manager_new (session);\n    if (!session->group_mgr)\n        goto onerror;\n\n    session->org_mgr = ccnet_org_manager_new (session);\n    if (!session->org_mgr)\n        goto onerror;\n\n    if (session->notif_url) {\n        session->notif_mgr = seaf_notif_manager_new (session, session->notif_url);\n        if (!session->notif_mgr) {\n            goto onerror;\n        }\n    }\n\n    session->metric_mgr = seaf_metric_manager_new (session);\n    if (!session->metric_mgr)\n        goto onerror;\n\n    return session;\n\nonerror:\n    g_free (config_file_path);\n    free (abs_seafile_dir);\n    free (abs_ccnet_dir);\n    g_free (tmp_file_dir);\n    g_free (session);\n    return NULL;    \n}\n\nSeafileSession *\nseafile_repair_session_new(const char *central_config_dir,\n                           const char *seafile_dir,\n                           const char *ccnet_dir)\n{\n    char *abs_central_config_dir = NULL;\n    char *abs_seafile_dir;\n    char *abs_ccnet_dir = NULL;\n    char *tmp_file_dir;\n    char *config_file_path;\n    GKeyFile *config;\n    SeafileSession *session = NULL;\n    gboolean notif_enabled = FALSE;\n    int notif_port = 8083;\n    gboolean cluster_mode;\n    gboolean use_block_cache;\n    int block_cache_size_limit;\n    char **block_cache_file_types;\n    gint64 repo_file_number_limit = -1;\n\n    abs_ccnet_dir = ccnet_expand_path (ccnet_dir);\n    abs_seafile_dir = ccnet_expand_path (seafile_dir);\n    tmp_file_dir = g_build_filename (abs_seafile_dir, \"tmpfiles\", NULL);\n    if (central_config_dir) {\n        abs_central_config_dir = ccnet_expand_path (central_config_dir);\n    }\n\n    config_file_path = g_build_filename(\n        abs_central_config_dir ? abs_central_config_dir : abs_seafile_dir,\n        \"seafile.conf\", NULL);\n\n    GError *error = NULL;\n    config = g_key_file_new ();\n    if (!g_key_file_load_from_file (config, config_file_path, \n                                    G_KEY_FILE_NONE, &error)) {\n        seaf_warning (\"Failed to load config file.\\n\");\n        g_key_file_free (config);\n        g_free (config_file_path);\n        goto onerror;\n    }\n    g_free (config_file_path);\n\n    session = g_new0(SeafileSession, 1);\n    session->seaf_dir = abs_seafile_dir;\n    session->ccnet_dir = abs_ccnet_dir;\n    session->tmp_file_dir = tmp_file_dir;\n    session->config = config;\n    session->is_repair = TRUE;\n\n    if (load_database_config (session) < 0) {\n        seaf_warning (\"Failed to load database config.\\n\");\n        goto onerror;\n    }\n\n    if (load_ccnet_database_config (session) < 0) {\n        seaf_warning (\"Failed to load ccnet database config.\\n\");\n        goto onerror;\n    }\n\n    session->fs_mgr = seaf_fs_manager_new (session, abs_seafile_dir);\n    if (!session->fs_mgr)\n        goto onerror;\n    session->block_mgr = seaf_block_manager_new (session, abs_seafile_dir);\n    if (!session->block_mgr)\n        goto onerror;\n    session->commit_mgr = seaf_commit_manager_new (session);\n    if (!session->commit_mgr)\n        goto onerror;\n    session->repo_mgr = seaf_repo_manager_new (session);\n    if (!session->repo_mgr)\n        goto onerror;\n    session->branch_mgr = seaf_branch_manager_new (session);\n    if (!session->branch_mgr)\n        goto onerror;\n\n    session->job_mgr = ccnet_job_manager_new (DEFAULT_THREAD_POOL_SIZE);\n\n    session->size_sched = size_scheduler_new (session);\n\n    return session;\n\nonerror:\n    free (abs_seafile_dir);\n    free (abs_ccnet_dir);\n    g_free (tmp_file_dir);\n    g_free (session);\n    return NULL;\n}\n\nint\nseafile_session_init (SeafileSession *session)\n{\n    if (seaf_commit_manager_init (session->commit_mgr) < 0)\n        return -1;\n\n    if (seaf_fs_manager_init (session->fs_mgr) < 0)\n        return -1;\n\n    if (seaf_branch_manager_init (session->branch_mgr) < 0) {\n        seaf_warning (\"Failed to init branch manager.\\n\");\n        return -1;\n    }\n\n    if (seaf_repo_manager_init (session->repo_mgr) < 0) {\n        seaf_warning (\"Failed to init repo manager.\\n\");\n        return -1;\n    }\n\n    if (seaf_quota_manager_init (session->quota_mgr) < 0) {\n        seaf_warning (\"Failed to init quota manager.\\n\");\n        return -1;\n    }\n\n    if (ccnet_user_manager_prepare (session->user_mgr) < 0) {\n        seaf_warning (\"Failed to init user manager.\\n\");\n        return -1;\n    }\n\n    if (ccnet_group_manager_prepare (session->group_mgr) < 0) {\n        seaf_warning (\"Failed to init group manager.\\n\");\n        return -1;\n    }\n\n    if (ccnet_org_manager_prepare (session->org_mgr) < 0) {\n        seaf_warning (\"Failed to init org manager.\\n\");\n        return -1;\n    }\n\n    if ((session->create_tables || seaf_db_type(session->db) == SEAF_DB_TYPE_PGSQL)\n        && seaf_cfg_manager_init (session->cfg_mgr) < 0) {\n        seaf_warning (\"Failed to init config manager.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nint\nseafile_session_start (SeafileSession *session)\n{\n    if (seaf_share_manager_start (session->share_mgr) < 0) {\n        seaf_warning (\"Failed to start share manager.\\n\");\n        return -1;\n    }\n\n    if (seaf_web_at_manager_start (session->web_at_mgr) < 0) {\n        seaf_warning (\"Failed to start web access check manager.\\n\");\n        return -1;\n    }\n\n    if (seaf_passwd_manager_start (session->passwd_mgr) < 0) {\n        seaf_warning (\"Failed to start password manager.\\n\");\n        return -1;\n    }\n\n    if (size_scheduler_start (session->size_sched) < 0) {\n        seaf_warning (\"Failed to start size scheduler.\\n\");\n        return -1;\n    }\n\n    if (seaf_copy_manager_start (session->copy_mgr) < 0) {\n        seaf_warning (\"Failed to start copy manager.\\n\");\n        return -1;\n    }\n\n    if (!session->go_fileserver) {\n#ifdef HAVE_EVHTP\n        if (seaf_http_server_start (session->http_server) < 0) {\n            seaf_warning (\"Failed to start http server thread.\\n\");\n            return -1;\n        }\n#else\n        seaf_warning (\"Failed to start http server thread, please use go fileserver.\\n\");\n        return -1;\n#endif\n    }\n\n    if (seaf_metric_manager_start (session->metric_mgr) < 0) {\n        seaf_warning (\"Failed to start metric manager.\\n\");\n        return -1;\n    }\n\n    return 0;\n}\n\nchar *\nget_system_default_repo_id (SeafileSession *session)\n{\n    char *sql = \"SELECT info_value FROM SystemInfo WHERE info_key='default_repo_id'\";\n    return seaf_db_get_string (session->db, sql);\n}\n\nint\nset_system_default_repo_id (SeafileSession *session, const char *repo_id)\n{\n    char sql[256];\n    snprintf (sql, sizeof(sql),\n              \"INSERT INTO SystemInfo (info_key, info_value) VALUES ('default_repo_id', '%s')\",\n              repo_id);\n    return seaf_db_query (session->db, sql);\n}\n\nstatic int\ndel_system_default_repo_id (SeafileSession *session)\n{\n    const char *sql = \"DELETE FROM SystemInfo WHERE info_key='default_repo_id'\";\n    return seaf_db_query (session->db, sql);\n}\n\n#define DEFAULT_TEMPLATE_DIR \"library-template\"\n\nstatic void\ncopy_template_files_recursive (SeafileSession *session,\n                               const char *repo_id,\n                               const char *repo_dir_path,\n                               const char *dir_path)\n{\n    GDir *dir;\n    const char *name;\n    char *sub_path, *repo_sub_path;\n    SeafStat st;\n    GError *error = NULL;\n    int rc;\n\n    dir = g_dir_open (dir_path, 0, &error);\n    if (!dir) {\n        seaf_warning (\"Failed to open template dir %s: %s.\\n\",\n                      dir_path, error->message);\n        return;\n    }\n\n    while ((name = g_dir_read_name(dir)) != NULL) {\n        sub_path = g_build_filename (dir_path, name, NULL);\n        if (seaf_stat (sub_path, &st) < 0) {\n            seaf_warning (\"Failed to stat %s: %s.\\n\", sub_path, strerror(errno));\n            g_free (sub_path);\n            continue;\n        }\n\n        if (S_ISREG(st.st_mode)) {\n            rc = seaf_repo_manager_post_file (session->repo_mgr,\n                                              repo_id,\n                                              sub_path,\n                                              repo_dir_path,\n                                              name,\n                                              \"System\",\n                                              NULL);\n            if (rc < 0)\n                seaf_warning (\"Failed to add template file %s.\\n\", sub_path);\n        } else if (S_ISDIR(st.st_mode)) {\n            rc = seaf_repo_manager_post_dir (session->repo_mgr,\n                                             repo_id,\n                                             repo_dir_path,\n                                             name,\n                                             \"System\",\n                                             NULL);\n            if (rc < 0) {\n                seaf_warning (\"Failed to add template dir %s.\\n\", sub_path);\n                g_free (sub_path);\n                continue;\n            }\n\n            repo_sub_path = g_build_path (\"/\", repo_dir_path, name, NULL);\n            copy_template_files_recursive (session, repo_id,\n                                           repo_sub_path, sub_path);\n            g_free (repo_sub_path);\n        }\n        g_free (sub_path);\n    }\n    g_dir_close (dir);\n}\n\nstatic void *\ncreate_system_default_repo (void *data)\n{\n    SeafileSession *session = data;\n    char *repo_id;\n    char *template_path;\n\n    /* If default repo is not set or doesn't exist, create a new one. */\n    repo_id = get_system_default_repo_id (session);\n    if (repo_id != NULL) {\n        SeafRepo *repo;\n        repo = seaf_repo_manager_get_repo (session->repo_mgr, repo_id);\n        if (!repo) {\n            seaf_warning (\"Failed to get system default repo. Create a new one.\\n\");\n            del_system_default_repo_id (session);\n            seaf_repo_manager_del_repo (session->repo_mgr, repo_id, NULL);\n            g_free (repo_id);\n        } else {\n            seaf_repo_unref (repo);\n            g_free (repo_id);\n            return data;\n        }\n    }\n\n    repo_id = seaf_repo_manager_create_new_repo (session->repo_mgr,\n                                                 \"My Library Template\",\n                                                 \"Template for creating 'My Library' for users\",\n                                                 \"System\",\n                                                 NULL, -1, NULL, NULL, NULL);\n    if (!repo_id) {\n        seaf_warning (\"Failed to create system default repo.\\n\");\n        return data;\n    }\n\n    set_system_default_repo_id (session, repo_id);\n\n    template_path = g_build_filename (session->seaf_dir, DEFAULT_TEMPLATE_DIR, NULL);\n    copy_template_files_recursive (session, repo_id, \"/\", template_path);\n\n    g_free (repo_id);\n    g_free (template_path);\n    return data;\n}\n\nvoid\nschedule_create_system_default_repo (SeafileSession *session)\n{\n    int db_type = seaf_db_type (session->db);\n    char *sql;\n\n    if (db_type == SEAF_DB_TYPE_MYSQL)\n        sql = \"CREATE TABLE IF NOT EXISTS SystemInfo \"\n        \"(id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT, \"\n        \"info_key VARCHAR(256), info_value VARCHAR(1024))\";\n    else\n        sql = \"CREATE TABLE IF NOT EXISTS SystemInfo( \"\n        \"info_key VARCHAR(256), info_value VARCHAR(1024))\";\n\n    if ((session->create_tables || db_type == SEAF_DB_TYPE_PGSQL)\n        && seaf_db_query (session->db, sql) < 0)\n        return;\n\n    ccnet_job_manager_schedule_job (session->job_mgr,\n                                    create_system_default_repo,\n                                    NULL, session);\n}\n"
  },
  {
    "path": "server/seafile-session.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SEAFILE_SESSION_H\n#define SEAFILE_SESSION_H\n\n#include <job-mgr.h>\n\n#include \"block-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"db.h\"\n#include \"seaf-db.h\"\n#include \"mq-mgr.h\"\n#include \"user-mgr.h\"\n#include \"group-mgr.h\"\n#include \"org-mgr.h\"\n\n#include \"share-mgr.h\"\n#include \"web-accesstoken-mgr.h\"\n#include \"passwd-mgr.h\"\n#include \"quota-mgr.h\"\n#include \"size-sched.h\"\n#include \"copy-mgr.h\"\n#include \"config-mgr.h\"\n\n#include \"http-server.h\"\n#include \"zip-download-mgr.h\"\n#include \"index-blocks-mgr.h\"\n#include \"notif-mgr.h\"\n#include \"http-tx-mgr.h\"\n#include \"obj-cache.h\"\n#include \"metric-mgr.h\"\n\n#include <searpc-client.h>\n\nstruct _CcnetClient;\n\ntypedef struct _SeafileSession SeafileSession;\n\n\nstruct _SeafileSession {\n    char                *central_config_dir;\n    char                *seaf_dir;\n    char                *ccnet_dir;\n    char                *tmp_file_dir;\n    /* Config that's only loaded on start */\n    GKeyFile            *config;\n    SeafDB              *db;\n    CcnetDB             *ccnet_db;\n    char                *seahub_pk;\n    char                *seahub_url;\n    ConnectionPool      *seahub_conn_pool;\n\n    SeafBlockManager    *block_mgr;\n    SeafFSManager       *fs_mgr;\n    SeafCommitManager   *commit_mgr;\n    SeafBranchManager   *branch_mgr;\n    SeafRepoManager     *repo_mgr;\n    SeafShareManager\t*share_mgr;\n    SeafPasswdManager   *passwd_mgr;\n    SeafQuotaManager    *quota_mgr;\n    SeafCopyManager     *copy_mgr;\n    SeafCfgManager      *cfg_mgr;\n    CcnetUserManager    *user_mgr;\n    CcnetGroupManager   *group_mgr;\n    CcnetOrgManager     *org_mgr;\n    \n    SeafWebAccessTokenManager\t*web_at_mgr;\n\n    SeafMqManager       *mq_mgr;\n    CcnetJobManager     *job_mgr;\n\n    SizeScheduler       *size_sched;\n\n    int                  cloud_mode;\n\n#ifdef HAVE_EVHTP\n    HttpServerStruct    *http_server;\n    ZipDownloadMgr      *zip_download_mgr;\n#endif\n    IndexBlksMgr        *index_blocks_mgr;\n\n    gboolean create_tables;\n    gboolean ccnet_create_tables;\n\n    gboolean go_fileserver;\n\n    int web_token_expire_time;\n    int max_index_processing_threads;\n    gint64 fixed_block_size;\n    int max_indexing_threads;\n    gint64 max_upload_size;\n\n    // For notification server\n    NotifManager *notif_mgr;\n    char         *notif_server_private_key;\n    char         *notif_url;\n\n    // For metric\n    SeafMetricManager *metric_mgr; \n    char              *node_name;\n\n    ObjCache *obj_cache;\n\n    gboolean            log_to_stdout;\n\n    gboolean is_repair;\n};\n\nextern SeafileSession *seaf;\n\nSeafileSession *\nseafile_session_new(const char *central_config_dir, \n                    const char *seafile_dir,\n                    const char *ccnet_dir);\n\nSeafileSession *\nseafile_repair_session_new(const char *central_config_dir,\n                           const char *seafile_dir,\n                           const char *ccnet_dir);\n\nint\nseafile_session_init (SeafileSession *session);\n\nint\nseafile_session_start (SeafileSession *session);\n\nchar *\nseafile_session_get_tmp_file_path (SeafileSession *session,\n                                   const char *basename,\n                                   char path[]);\n\nvoid\nschedule_create_system_default_repo (SeafileSession *session);\n\nchar *\nget_system_default_repo_id (SeafileSession *session);\n\nint\nset_system_default_repo_id (SeafileSession *session, const char *repo_id);\n\n#endif /* SEAFILE_H */\n"
  },
  {
    "path": "server/share-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n#include \"utils.h\"\n\n#include \"log.h\"\n\n#include \"seafile-session.h\"\n#include \"share-mgr.h\"\n\n#include \"seaf-db.h\"\n#include \"log.h\"\n#include \"seafile-error.h\"\n\nSeafShareManager *\nseaf_share_manager_new (SeafileSession *seaf)\n{\n    SeafShareManager *mgr = g_new0 (SeafShareManager, 1);\n\n    mgr->seaf = seaf;\n\n    return mgr;\n}\n\nint\nseaf_share_manager_start (SeafShareManager *mgr)\n{\n    if (!mgr->seaf->create_tables && seaf_db_type (mgr->seaf->db) != SEAF_DB_TYPE_PGSQL)\n        return 0;\n\n    SeafDB *db = mgr->seaf->db;\n    const char *sql;\n\n    int db_type = seaf_db_type (db);\n    if (db_type == SEAF_DB_TYPE_MYSQL) {\n        sql = \"CREATE TABLE IF NOT EXISTS SharedRepo \"\n            \"(id BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT,\"\n            \"repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), \"\n            \"permission CHAR(15), INDEX (repo_id), \"\n            \"INDEX(from_email), INDEX(to_email)) ENGINE=INNODB\";\n\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    } else if (db_type == SEAF_DB_TYPE_SQLITE) {\n        sql = \"CREATE TABLE IF NOT EXISTS SharedRepo \"\n            \"(repo_id CHAR(37) , from_email VARCHAR(255), to_email VARCHAR(255), \"\n            \"permission CHAR(15))\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        sql = \"CREATE INDEX IF NOT EXISTS RepoIdIndex on SharedRepo (repo_id)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        sql = \"CREATE INDEX IF NOT EXISTS FromEmailIndex on SharedRepo (from_email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n        sql = \"CREATE INDEX IF NOT EXISTS ToEmailIndex on SharedRepo (to_email)\";\n        if (seaf_db_query (db, sql) < 0)\n            return -1;\n    }\n    /* else if (db_type == SEAF_DB_TYPE_PGSQL) { */\n    /*     sql = \"CREATE TABLE IF NOT EXISTS SharedRepo \" */\n    /*         \"(repo_id CHAR(36) , from_email VARCHAR(255), to_email VARCHAR(255), \" */\n    /*         \"permission VARCHAR(15))\"; */\n    /*     if (seaf_db_query (db, sql) < 0) */\n    /*         return -1; */\n\n    /*     if (!pgsql_index_exists (db, \"sharedrepo_repoid_idx\")) { */\n    /*         sql = \"CREATE INDEX sharedrepo_repoid_idx ON SharedRepo (repo_id)\"; */\n    /*         if (seaf_db_query (db, sql) < 0) */\n    /*             return -1; */\n    /*     } */\n    /*     if (!pgsql_index_exists (db, \"sharedrepo_from_email_idx\")) { */\n    /*         sql = \"CREATE INDEX sharedrepo_from_email_idx ON SharedRepo (from_email)\"; */\n    /*         if (seaf_db_query (db, sql) < 0) */\n    /*             return -1; */\n    /*     } */\n    /*     if (!pgsql_index_exists (db, \"sharedrepo_to_email_idx\")) { */\n    /*         sql = \"CREATE INDEX sharedrepo_to_email_idx ON SharedRepo (to_email)\"; */\n    /*         if (seaf_db_query (db, sql) < 0) */\n    /*             return -1; */\n    /*     } */\n    /* } */\n    \n    return 0;\n}\n\nint\nseaf_share_manager_add_share (SeafShareManager *mgr, const char *repo_id,\n                              const char *from_email, const char *to_email,\n                              const char *permission)\n{\n    gboolean db_err = FALSE;\n    int ret = 0;\n\n    char *from_email_l = g_ascii_strdown (from_email, -1);\n    char *to_email_l = g_ascii_strdown (to_email, -1);\n\n    if (seaf_db_statement_exists (mgr->seaf->db,\n                                  \"SELECT repo_id from SharedRepo \"\n                                  \"WHERE repo_id=? AND \"\n                                  \"from_email=? AND to_email=?\",\n                                  &db_err, 3, \"string\", repo_id,\n                                  \"string\", from_email_l, \"string\", to_email_l))\n        goto out;\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"INSERT INTO SharedRepo (repo_id, from_email, \"\n                                 \"to_email, permission) VALUES (?, ?, ?, ?)\",\n                                 4, \"string\", repo_id, \"string\", from_email_l,\n                                 \"string\", to_email_l, \"string\", permission) < 0) {\n        ret = -1;\n        goto out;\n    }\n\nout:\n    g_free (from_email_l);\n    g_free (to_email_l);\n    return ret;\n}\n\nint\nseaf_share_manager_set_subdir_perm_by_path (SeafShareManager *mgr, const char *repo_id,\n                                           const char *from_email, const char *to_email,\n                                           const char *permission, const char *path)\n{\n    char *sql;\n    int ret;\n\n    char *from_email_l = g_ascii_strdown (from_email, -1);\n    char *to_email_l = g_ascii_strdown (to_email, -1);\n    sql = \"UPDATE SharedRepo SET permission=? WHERE repo_id IN \"\n          \"(SELECT repo_id FROM VirtualRepo WHERE origin_repo=? AND path=?) \"\n          \"AND from_email=? AND to_email=?\";\n\n    ret = seaf_db_statement_query (mgr->seaf->db, sql,\n                                   5, \"string\", permission,\n                                   \"string\", repo_id,\n                                   \"string\", path,\n                                   \"string\", from_email_l,\n                                   \"string\", to_email_l);\n    g_free (from_email_l);\n    g_free (to_email_l);\n    return ret;\n}\n\nint\nseaf_share_manager_set_permission (SeafShareManager *mgr, const char *repo_id,\n                                   const char *from_email, const char *to_email,\n                                   const char *permission)\n{\n    char *sql;\n    int ret;\n\n    char *from_email_l = g_ascii_strdown (from_email, -1);\n    char *to_email_l = g_ascii_strdown (to_email, -1);\n    sql = \"UPDATE SharedRepo SET permission=? WHERE \"\n        \"repo_id=? AND from_email=? AND to_email=?\";\n\n    ret = seaf_db_statement_query (mgr->seaf->db, sql,\n                                   4, \"string\", permission, \"string\", repo_id,\n                                   \"string\", from_email_l, \"string\", to_email_l);\n\n    g_free (from_email_l);\n    g_free (to_email_l);\n    return ret;\n}\n\nstatic gboolean\ncollect_repos (SeafDBRow *row, void *data)\n{\n    GList **p_repos = data;\n    const char *repo_id;\n    const char *vrepo_id;\n    const char *email;\n    const char *permission;\n    const char *commit_id;\n    gint64 size;\n    SeafileRepo *repo;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    vrepo_id = seaf_db_row_get_column_text (row, 1);\n    email = seaf_db_row_get_column_text (row, 2);\n    permission = seaf_db_row_get_column_text (row, 3);\n    commit_id = seaf_db_row_get_column_text (row, 4);\n    size = seaf_db_row_get_column_int64 (row, 5);\n    const char *repo_name = seaf_db_row_get_column_text (row, 8);\n    gint64 update_time = seaf_db_row_get_column_int64 (row, 9);\n    int version = seaf_db_row_get_column_int (row, 10); \n    gboolean is_encrypted = seaf_db_row_get_column_int (row, 11) ? TRUE : FALSE;\n    const char *last_modifier = seaf_db_row_get_column_text (row, 12);\n    int status = seaf_db_row_get_column_int (row, 13);\n    const char *type = seaf_db_row_get_column_text (row, 14);\n    const char *origin_repo_name = seaf_db_row_get_column_text (row, 15);\n\n    char *email_l = g_ascii_strdown (email, -1);\n\n    repo = g_object_new (SEAFILE_TYPE_REPO,\n                         \"share_type\", \"personal\",\n                         \"repo_id\", repo_id,\n                         \"id\", repo_id,\n                         \"head_cmmt_id\", commit_id,\n                         \"user\", email_l,\n                         \"permission\", permission,\n                         \"is_virtual\", (vrepo_id != NULL),\n                         \"size\", size,\n                         \"status\", status,\n                         NULL);\n    g_free (email_l);\n\n    if (repo) {\n        if (vrepo_id) {\n            const char *origin_repo_id = seaf_db_row_get_column_text (row, 6);\n            const char *origin_path = seaf_db_row_get_column_text (row, 7);\n            g_object_set (repo, \"store_id\", origin_repo_id,\n                          \"origin_repo_id\", origin_repo_id,\n                          \"origin_repo_name\", origin_repo_name,\n                          \"origin_path\", origin_path, NULL);\n        } else {\n            g_object_set (repo, \"store_id\", repo_id, NULL);\n        }\n        if (repo_name) {\n            g_object_set (repo, \"name\", repo_name,\n                          \"repo_name\", repo_name,\n                          \"last_modify\", update_time,\n                          \"last_modified\", update_time,\n                          \"version\", version,\n                          \"encrypted\", is_encrypted,\n                          \"last_modifier\", last_modifier, NULL);\n        }\n        if (type) {\n            g_object_set (repo, \"repo_type\", type, NULL);\n        }\n        *p_repos = g_list_prepend (*p_repos, repo);\n    }\n\n    return TRUE;\n}\n\nstatic void\nseaf_fill_repo_commit_if_not_in_db (GList **repos)\n{\n    char *repo_name = NULL;\n    char *last_modifier = NULL;\n    char *repo_id = NULL;\n    char *commit_id = NULL;\n    SeafileRepo *repo = NULL;\n    GList *p = NULL;\n\n    for (p = *repos; p;) {\n        repo = p->data;\n        g_object_get (repo, \"name\", &repo_name, NULL);\n        g_object_get (repo, \"last_modifier\", &last_modifier, NULL);\n        if (!repo_name || !last_modifier) {\n            g_object_get (repo, \"repo_id\", &repo_id,\n                          \"head_cmmt_id\", &commit_id, NULL);\n            SeafCommit *commit = seaf_commit_manager_get_commit_compatible (seaf->commit_mgr,\n                                                                            repo_id, commit_id);\n            if (!commit) {\n                seaf_warning (\"Commit %s:%s is missing\\n\", repo_id, commit_id);\n                GList *next = p->next;\n                g_object_unref (repo);\n                *repos = g_list_delete_link (*repos, p);\n                p = next;\n                if (repo_name)\n                    g_free (repo_name);\n                if (last_modifier)\n                    g_free (last_modifier);\n                continue;\n            } else {\n                g_object_set (repo, \"name\", commit->repo_name,\n                                    \"repo_name\", commit->repo_name,\n                                    \"last_modify\", commit->ctime,\n                                    \"last_modified\", commit->ctime,\n                                    \"version\", commit->version,\n                                    \"encrypted\", commit->encrypted,\n                                    \"last_modifier\", commit->creator_name,\n                                    NULL);\n\n                /* Set to database */\n                set_repo_commit_to_db (repo_id, commit->repo_name, commit->ctime, commit->version,\n                                       commit->encrypted, commit->creator_name);\n\n                seaf_commit_unref (commit);\n            }\n            g_free (repo_id);\n            g_free (commit_id);\n        }\n        if (repo_name)\n            g_free (repo_name);\n        if (last_modifier)\n            g_free (last_modifier);\n\n        p = p->next;\n    }\n}\n\nGList*\nseaf_share_manager_list_share_repos (SeafShareManager *mgr, const char *email,\n                                     const char *type, int start, int limit,\n                                     gboolean *db_err)\n{\n    GList *ret = NULL, *p;\n    char *sql;\n\n    if (start == -1 && limit == -1) {\n        if (g_strcmp0 (type, \"from_email\") == 0) {\n            sql = \"SELECT sh.repo_id, v.repo_id, \"\n                \"to_email, permission, commit_id, s.size, \"\n                \"v.origin_repo, v.path, i.name, \"\n                \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n                \"i2.name FROM \"\n                \"SharedRepo sh LEFT JOIN VirtualRepo v ON \"\n                \"sh.repo_id=v.repo_id \"\n                \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n                \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n                \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n                \"WHERE from_email=? AND \"\n                \"sh.repo_id = b.repo_id AND \"\n                \"b.name = 'master' \"\n                \"ORDER BY i.update_time DESC, sh.repo_id\";\n        } else if (g_strcmp0 (type, \"to_email\") == 0) {\n            sql = \"SELECT sh.repo_id, v.repo_id, \"\n                \"from_email, permission, commit_id, s.size, \"\n                \"v.origin_repo, v.path, i.name, \"\n                \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n                \"i2.name FROM \"\n                \"SharedRepo sh LEFT JOIN VirtualRepo v ON \"\n                \"sh.repo_id=v.repo_id \"\n                \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n                \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n                \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n                \"WHERE to_email=? AND \"\n                \"sh.repo_id = b.repo_id AND \"\n                \"b.name = 'master' \"\n                \"ORDER BY i.update_time DESC, sh.repo_id\";\n        } else {\n            /* should never reach here */\n            seaf_warning (\"[share mgr] Wrong column type\");\n            return NULL;\n        }\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                           collect_repos, &ret,\n                                           1, \"string\", email) < 0) {\n            seaf_warning (\"[share mgr] DB error when get shared repo id and email \"\n                       \"for %s.\\n\", email);\n            for (p = ret; p; p = p->next)\n                g_object_unref (p->data);\n            g_list_free (ret);\n            if (db_err)\n                *db_err = TRUE;\n            return NULL;\n        }\n    }\n    else {\n        if (g_strcmp0 (type, \"from_email\") == 0) {\n            sql = \"SELECT sh.repo_id, v.repo_id, \"\n                \"to_email, permission, commit_id, s.size, \"\n                \"v.origin_repo, v.path, i.name, \"\n                \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n                \"i2.name FROM \"\n                \"SharedRepo sh LEFT JOIN VirtualRepo v ON \"\n                \"sh.repo_id=v.repo_id \"\n                \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n                \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n                \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n                \"WHERE from_email=? \"\n                \"AND sh.repo_id = b.repo_id \"\n                \"AND b.name = 'master' \"\n                \"ORDER BY i.update_time DESC, sh.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n        } else if (g_strcmp0 (type, \"to_email\") == 0) {\n            sql = \"SELECT sh.repo_id, v.repo_id, \"\n                \"from_email, permission, commit_id, s.size, \"\n                \"v.origin_repo, v.path, i.name, \"\n                \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n                \"i2.name FROM \"\n                \"SharedRepo sh LEFT JOIN VirtualRepo v ON \"\n                \"sh.repo_id=v.repo_id \"\n                \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n                \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n                \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n                \"WHERE to_email=? \"\n                \"AND sh.repo_id = b.repo_id \"\n                \"AND b.name = 'master' \"\n                \"ORDER BY i.update_time DESC, sh.repo_id \"\n                \"LIMIT ? OFFSET ?\";\n        } else {\n            /* should never reach here */\n            seaf_warning (\"[share mgr] Wrong column type\");\n            return NULL;\n        }\n\n        if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                           collect_repos, &ret,\n                                           3, \"string\", email,\n                                           \"int\", limit, \"int\", start) < 0) {\n            seaf_warning (\"[share mgr] DB error when get shared repo id and email \"\n                       \"for %s.\\n\", email);\n            for (p = ret; p; p = p->next)\n                g_object_unref (p->data);\n            g_list_free (ret);\n            if (db_err)\n                *db_err = TRUE;\n            return NULL;\n        }\n    }\n\n    seaf_fill_repo_commit_if_not_in_db (&ret);\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\ncollect_shared_to (SeafDBRow *row, void *data)\n{\n    GList **plist = data;\n    const char *to_email;\n\n    to_email = seaf_db_row_get_column_text (row, 0);\n    *plist = g_list_prepend (*plist, g_ascii_strdown(to_email, -1));\n\n    return TRUE;\n}\n\nGList *\nseaf_share_manager_list_shared_to (SeafShareManager *mgr,\n                                   const char *owner,\n                                   const char *repo_id)\n{\n    char *sql;\n    GList *ret = NULL;\n\n    sql = \"SELECT to_email FROM SharedRepo WHERE \"\n        \"from_email=? AND repo_id=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       collect_shared_to, &ret,\n                                       2, \"string\", owner, \"string\", repo_id) < 0) {\n        seaf_warning (\"[share mgr] DB error when list shared to.\\n\");\n        string_list_free (ret);\n        return NULL;\n    }\n\n    return ret;\n}\n\nstatic gboolean\ncollect_repo_shared_to (SeafDBRow *row, void *data)\n{\n    GList **shared_to = data;\n    const char *to_email = seaf_db_row_get_column_text (row, 0);\n    char *email_down = g_ascii_strdown(to_email, -1);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n    const char *repo_id = seaf_db_row_get_column_text (row, 2);\n\n    SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER,\n                                            \"repo_id\", repo_id,\n                                            \"user\", email_down,\n                                            \"perm\", perm,\n                                            NULL);\n    *shared_to = g_list_prepend (*shared_to, uobj);\n    g_free (email_down);\n\n    return TRUE;\n}\n\nGList *\nseaf_share_manager_list_repo_shared_to (SeafShareManager *mgr,\n                                        const char *from_email,\n                                        const char *repo_id,\n                                        GError **error)\n{\n    GList *shared_to = NULL;\n    char *sql = \"SELECT to_email, permission, repo_id FROM SharedRepo WHERE \"\n                \"from_email=? AND repo_id=?\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                             collect_repo_shared_to, &shared_to,\n                                             2, \"string\", from_email, \"string\", repo_id);\n    if (ret < 0) {\n        seaf_warning (\"Failed to list repo %s shared to from db.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to list repo shared to from db\");\n        while (shared_to) {\n            g_object_unref (shared_to->data);\n            shared_to = g_list_delete_link (shared_to, shared_to);\n        }\n        return NULL;\n    }\n\n    return shared_to;\n}\n\nstatic gboolean\ncollect_repo_shared_group (SeafDBRow *row, void *data)\n{\n    GList **shared_group = data;\n    int group_id = seaf_db_row_get_column_int (row, 0);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n    const char *repo_id = seaf_db_row_get_column_text (row, 2);\n\n    SeafileSharedGroup *gobj = g_object_new (SEAFILE_TYPE_SHARED_GROUP,\n                                             \"repo_id\", repo_id,\n                                             \"group_id\", group_id,\n                                             \"perm\", perm,\n                                             NULL);\n    *shared_group = g_list_prepend (*shared_group, gobj);\n\n    return TRUE;\n}\n\nGList *\nseaf_share_manager_list_repo_shared_group (SeafShareManager *mgr,\n                                           const char *from_email,\n                                           const char *repo_id,\n                                           GError **error)\n{\n    GList *shared_group = NULL;\n    char *sql = \"SELECT group_id, permission, repo_id FROM RepoGroup WHERE \"\n                \"user_name=? AND repo_id=?\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                             collect_repo_shared_group, &shared_group,\n                                             2, \"string\", from_email, \"string\", repo_id);\n    if (ret < 0) {\n        seaf_warning (\"Failed to list repo %s shared group from db.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to list repo shared group from db\");\n        while (shared_group) {\n            g_object_unref (shared_group->data);\n            shared_group = g_list_delete_link (shared_group, shared_group);\n        }\n        return NULL;\n    }\n\n    return shared_group;\n}\n\nstatic gboolean\nget_shared_dirs_to_user (SeafDBRow *row, void *data)\n{\n    GHashTable *dirs = data;\n\n    const char *path = seaf_db_row_get_column_text (row, 0);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n    g_hash_table_replace (dirs, g_strdup (path), g_strdup (perm));\n\n    return TRUE;\n}\n\nstatic gboolean\nget_shared_dirs_to_group (SeafDBRow *row, void *data)\n{\n    GHashTable *dirs = data;\n\n    const char *path = seaf_db_row_get_column_text (row, 0);\n    const char *perm = seaf_db_row_get_column_text (row, 1);\n\n    char *prev_perm = g_hash_table_lookup (dirs, path);\n    if (g_strcmp0 (perm, prev_perm) != 0 &&\n        (prev_perm == NULL || g_strcmp0 (prev_perm, \"r\") == 0)) {\n        g_hash_table_replace (dirs, g_strdup (path), g_strdup (perm));\n    }\n\n    return TRUE;\n}\n\n// Conver group id list to comma separated str\n// [1, 2, 3] -> 1,2,3\nstatic GString *\nconvert_group_list_to_str (GList *groups)\n{\n    GList *iter = groups;\n    CcnetGroup *group;\n    int group_id;\n    GString *group_ids = g_string_new (\"\");\n\n    for (; iter; iter = iter->next) {\n        group = iter->data;\n        g_object_get (group, \"id\", &group_id, NULL);\n        g_string_append_printf (group_ids, \"%d,\", group_id);\n    }\n    group_ids = g_string_erase (group_ids, group_ids->len - 1, 1);\n\n    return group_ids;\n}\n\nGHashTable *\nseaf_share_manager_get_shared_dirs_to_user (SeafShareManager *mgr,\n                                            const char *orig_repo_id,\n                                            const char *to_email)\n{\n    GHashTable *dirs;\n    char *sql;\n\n    dirs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);\n    sql = \"SELECT v.path, s.permission FROM SharedRepo s, VirtualRepo v WHERE \"\n          \"s.repo_id = v.repo_id AND s.to_email = ? AND v.origin_repo = ?\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_dirs_to_user,\n                                             dirs, 2, \"string\", to_email,\n                                             \"string\", orig_repo_id);\n    if (ret < 0) {\n        seaf_warning (\"Failed to get all shared folder perms \"\n                      \"in parent repo %.8s for user %s.\\n\", orig_repo_id, to_email);\n        g_hash_table_destroy (dirs);\n        return NULL;\n    }\n\n    return dirs;\n}\n\nGHashTable *\nseaf_share_manager_get_shared_dirs_to_group (SeafShareManager *mgr,\n                                             const char *orig_repo_id,\n                                             GList *groups)\n{\n    GHashTable *dirs;\n    GString *group_ids;\n    char *sql;\n\n    dirs = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free);\n    group_ids = convert_group_list_to_str (groups);\n    sql = g_strdup_printf (\"SELECT v.path, s.permission \"\n                           \"FROM RepoGroup s, VirtualRepo v WHERE \"\n                           \"s.repo_id = v.repo_id AND v.origin_repo = ? \"\n                           \"AND s.group_id in (%s)\", group_ids->str);\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql, get_shared_dirs_to_group,\n                                             dirs, 1, \"string\", orig_repo_id);\n    g_free (sql);\n    g_string_free (group_ids, TRUE);\n\n    if (ret < 0) {\n        seaf_warning (\"Failed to get all shared folder perm from parent repo %.8s \"\n                      \"to all user groups.\\n\", orig_repo_id);\n        g_hash_table_destroy (dirs);\n        return NULL;\n    }\n\n    return dirs;\n}\n\nint\nseaf_share_manager_remove_share (SeafShareManager *mgr, const char *repo_id,\n                                 const char *from_email, const char *to_email)\n{\n    if (seaf_db_statement_query (mgr->seaf->db,\n                       \"DELETE FROM SharedRepo WHERE repo_id = ? AND from_email =\"\n                       \" ? AND to_email = ?\",\n                       3, \"string\", repo_id, \"string\", from_email,\n                       \"string\", to_email) < 0)\n        return -1;\n\n    return 0;\n}\n\nint\nseaf_share_manager_unshare_subdir (SeafShareManager* mgr,\n                                   const char *orig_repo_id,\n                                   const char *path,\n                                   const char *from_email,\n                                   const char *to_email)\n{\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"DELETE FROM SharedRepo WHERE \"\n                                 \"from_email = ? AND to_email = ? \"\n                                 \"AND repo_id IN \"\n                                 \"(SELECT repo_id FROM VirtualRepo WHERE \"\n                                 \"origin_repo = ? AND path = ?)\",\n                                 4, \"string\", from_email,\n                                 \"string\", to_email,\n                                 \"string\", orig_repo_id,\n                                 \"string\", path) < 0)\n        return -1;\n\n    return 0;\n}\n\nint\nseaf_share_manager_remove_repo (SeafShareManager *mgr, const char *repo_id)\n{\n    if (seaf_db_statement_query (mgr->seaf->db,\n                       \"DELETE FROM SharedRepo WHERE repo_id = ?\",\n                       1, \"string\", repo_id) < 0)\n        return -1;\n\n    return 0;\n}\n\nchar *\nseaf_share_manager_check_permission (SeafShareManager *mgr,\n                                     const char *repo_id,\n                                     const char *email)\n{\n    char *sql;\n\n    sql = \"SELECT permission FROM SharedRepo WHERE repo_id=? AND to_email=?\";\n    return seaf_db_statement_get_string (mgr->seaf->db, sql,\n                                         2, \"string\", repo_id, \"string\", email);\n}\n\nstatic gboolean\nget_shared_sub_dirs (SeafDBRow *row, void *data)\n{\n    GHashTable *sub_dirs = data;\n    int dummy;\n\n    const char *sub_dir = seaf_db_row_get_column_text (row, 0);\n    g_hash_table_replace (sub_dirs, g_strdup(sub_dir), &dummy);\n\n    return TRUE;\n}\n\nGHashTable *\nseaf_share_manager_get_shared_sub_dirs (SeafShareManager *mgr,\n                                        const char *repo_id,\n                                        const char *path)\n{\n    GHashTable *sub_dirs = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                  g_free, NULL);\n    char *pattern;\n    if (strcmp (path, \"/\") == 0) {\n        pattern = g_strdup_printf(\"%s%%\", path);\n    } else {\n        pattern = g_strdup_printf (\"%s/%%\", path);\n    }\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                             \"SELECT v.path FROM VirtualRepo v, SharedRepo s \"\n                                             \"WHERE v.repo_id = s.repo_id and \"\n                                             \"v.origin_repo = ? AND v.path LIKE ?\",\n                                             get_shared_sub_dirs, sub_dirs,\n                                             2, \"string\", repo_id, \"string\", pattern);\n\n    if (ret < 0) {\n        g_free (pattern);\n        seaf_warning (\"Failed to get shared sub dirs from db.\\n\");\n        g_hash_table_destroy (sub_dirs);\n        return NULL;\n    }\n\n    ret = seaf_db_statement_foreach_row (mgr->seaf->db,\n                                         \"SELECT v.path FROM VirtualRepo v, RepoGroup r \"\n                                         \"WHERE v.repo_id = r.repo_id and \"\n                                         \"v.origin_repo = ? AND v.path LIKE ?\",\n                                         get_shared_sub_dirs, sub_dirs,\n                                         2, \"string\", repo_id, \"string\", pattern);\n    g_free (pattern);\n\n    if (ret < 0) {\n        seaf_warning (\"Failed to get shared sub dirs from db.\\n\");\n        g_hash_table_destroy (sub_dirs);\n        return NULL;\n    }\n\n    return sub_dirs;\n}\n\nint\nseaf_share_manager_is_repo_shared (SeafShareManager *mgr,\n                                   const char *repo_id)\n{\n    gboolean ret;\n    gboolean db_err = FALSE;\n\n    ret = seaf_db_statement_exists (mgr->seaf->db,\n                                    \"SELECT repo_id FROM SharedRepo WHERE \"\n                                    \"repo_id = ?\", &db_err,\n                                    1, \"string\", repo_id);\n    if (db_err) {\n        seaf_warning (\"DB error when check repo exist in SharedRepo.\\n\");\n        return -1;\n    }\n\n    if (!ret) {\n        ret = seaf_db_statement_exists (mgr->seaf->db,\n                                        \"SELECT repo_id FROM RepoGroup WHERE \"\n                                        \"repo_id = ?\", &db_err,\n                                        1, \"string\", repo_id);\n        if (db_err) {\n            seaf_warning (\"DB error when check repo exist in RepoGroup.\\n\");\n            return -1;\n        }\n    }\n\n    return ret;\n}\n\nGObject *\nseaf_get_shared_repo_by_path (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *path,\n                              const char *shared_to,\n                              int is_org,\n                              GError **error)\n{\n    char *sql;\n    char *real_repo_id = NULL;\n    GList *repo = NULL;\n    GObject *ret = NULL;\n\n    /* If path is NULL, 'repo_id' represents for the repo we want,\n     * otherwise, 'repo_id' represents for the origin repo,\n     * find virtual repo by path first.\n     */\n    if (path != NULL) {\n        real_repo_id = seaf_repo_manager_get_virtual_repo_id (mgr, repo_id, path, NULL);\n        if (!real_repo_id) {\n            seaf_warning (\"Failed to get virtual repo_id by path %s, origin_repo: %s\\n\", path, repo_id);\n            return NULL;\n        }\n    }\n    if (!real_repo_id)\n        real_repo_id = g_strdup (repo_id);\n\n    if (!is_org)\n        sql = \"SELECT sh.repo_id, v.repo_id, \"\n              \"from_email, permission, commit_id, s.size, \"\n              \"v.origin_repo, v.path, i.name, \"\n              \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n              \"i2.name FROM \"\n              \"SharedRepo sh LEFT JOIN VirtualRepo v ON \"\n              \"sh.repo_id=v.repo_id \"\n              \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n              \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n              \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n              \"WHERE to_email=? AND \"\n              \"sh.repo_id = b.repo_id AND sh.repo_id=? AND \"\n              \"b.name = 'master' \";\n    else\n        sql = \"SELECT sh.repo_id, v.repo_id, \"\n              \"from_email, permission, commit_id, s.size, \"\n              \"v.origin_repo, v.path, i.name, \"\n              \"i.update_time, i.version, i.is_encrypted, i.last_modifier, i.status, i.type, \"\n              \"i2.name FROM \"\n              \"OrgSharedRepo sh LEFT JOIN VirtualRepo v ON \"\n              \"sh.repo_id=v.repo_id \"\n              \"LEFT JOIN RepoSize s ON sh.repo_id = s.repo_id \"\n              \"LEFT JOIN RepoInfo i ON sh.repo_id = i.repo_id \"\n              \"LEFT JOIN RepoInfo i2 ON v.origin_repo = i2.repo_id, Branch b \"\n              \"WHERE to_email=? AND \"\n              \"sh.repo_id = b.repo_id AND sh.repo_id=? AND \"\n              \"b.name = 'master' \";\n\n    /* The list 'repo' should have only one repo,\n     * use existing api collect_repos() to get it.\n     */\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                       collect_repos, &repo,\n                                       2, \"string\", shared_to, \"string\", real_repo_id) < 0) {\n            g_free (real_repo_id);\n            g_list_free (repo);\n            seaf_warning (\"[share mgr] DB error when get shared repo \"\n                          \"for %s, path:%s\\n\", shared_to, path);\n            return NULL;\n    }\n    g_free (real_repo_id);\n    if (repo) {\n        ret = (GObject *)(repo->data);\n        g_list_free (repo);\n    }\n\n    return ret;\n}\n\nint\nseaf_share_manager_unshare_group_subdir (SeafShareManager* mgr,\n                                         const char *repo_id,\n                                         const char *path,\n                                         const char *owner,\n                                         int group_id)\n{\n    if (seaf_db_statement_query (mgr->seaf->db,\n                                 \"DELETE FROM RepoGroup WHERE \"\n                                 \"user_name = ? AND group_id = ? \"\n                                 \"AND repo_id IN \"\n                                 \"(SELECT repo_id FROM VirtualRepo WHERE \"\n                                 \"origin_repo = ? AND path = ?)\",\n                                 4, \"string\", owner,\n                                 \"int\", group_id,\n                                 \"string\", repo_id,\n                                 \"string\", path) < 0)\n        return -1;\n\n    return 0;\n}\n\ngboolean\nseaf_share_manager_repo_has_been_shared (SeafShareManager* mgr,\n                                         const char *repo_id,\n                                         gboolean including_groups)\n{\n    gboolean exists;\n    gboolean db_err = FALSE;\n    char *sql;\n\n    sql = \"SELECT 1 FROM SharedRepo WHERE repo_id=?\";\n    exists = seaf_db_statement_exists (mgr->seaf->db, sql, &db_err,\n                                       1, \"string\", repo_id);\n    if (db_err) {\n        seaf_warning (\"DB error when check repo exist in SharedRepo and RepoGroup.\\n\");\n        return FALSE;\n    }\n\n    if (!exists && including_groups) {\n        sql = \"SELECT 1 FROM RepoGroup WHERE repo_id=?\";\n        exists = seaf_db_statement_exists (mgr->seaf->db, sql, &db_err,\n                                           1, \"string\", repo_id);\n    }\n\n    return exists;\n}\n\ngboolean\nget_shared_users_cb (SeafDBRow *row, void *data)\n{\n    GList **users = data;\n    const char *repo_id = seaf_db_row_get_column_text (row, 0);\n    const char *user = seaf_db_row_get_column_text (row, 1);\n    const char *perm = seaf_db_row_get_column_text (row, 2);\n    SeafileSharedUser *uobj = g_object_new (SEAFILE_TYPE_SHARED_USER,\n                                            \"repo_id\", repo_id,\n                                            \"user\", user,\n                                            \"perm\", perm,\n                                            NULL);\n    *users = g_list_append (*users, uobj);\n\n    return TRUE;\n}\n\nGList *\nseaf_share_manager_org_get_shared_users_by_repo (SeafShareManager* mgr,\n                                                 int org_id,\n                                                 const char *repo_id)\n{\n    GList *users = NULL;\n    char *sql = \"SELECT repo_id, to_email, permission FROM OrgSharedRepo WHERE org_id=? AND \"\n                \"repo_id=?\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                             get_shared_users_cb, &users,\n                                             2, \"int\", org_id, \"string\", repo_id);\n    if (ret < 0) {\n        seaf_warning(\"Failed to get users by repo_id[%s], org_id[%d]\\n\",\n                     repo_id, org_id);\n        return NULL;\n    }\n\n    return users;\n}\n\n\nGList *\nseaf_share_manager_get_shared_users_by_repo(SeafShareManager* mgr,\n                                            const char *repo_id)\n{\n    GList *users = NULL;\n    char *sql = \"SELECT repo_id, to_email, permission FROM SharedRepo WHERE \"\n                \"repo_id=?\";\n\n    int ret = seaf_db_statement_foreach_row (mgr->seaf->db, sql,\n                                             get_shared_users_cb, &users,\n                                             1, \"string\", repo_id);\n    if (ret < 0) {\n        seaf_warning(\"Failed to get users by repo_id[%s]\\n\", repo_id);\n        return NULL;\n    }\n\n    return users;\n}\n"
  },
  {
    "path": "server/share-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef SHARE_MGR_H\n#define SHARE_MGR_H\n\n#include <glib.h>\n\nstruct _SeafileSession;\n\ntypedef struct _SeafShareManager SeafShareManager;\ntypedef struct _SeafShareManagerPriv SeafShareManagerPriv;\ntypedef struct _ShareRepoInfo ShareRepoInfo;\n\nstruct _SeafShareManager {\n    struct _SeafileSession *seaf;\n\n};\n\nSeafShareManager*\nseaf_share_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_share_manager_start (SeafShareManager *mgr);\n\nint\nseaf_share_manager_add_share (SeafShareManager *mgr, const char *repo_id,\n                              const char *from_email, const char *to_email,\n                              const char *permission);\n\nint\nseaf_share_manager_set_subdir_perm_by_path (SeafShareManager *mgr, const char *repo_id,\n                                            const char *from_email, const char *to_email,\n                                            const char *permission, const char *path);\n\nint\nseaf_share_manager_set_permission (SeafShareManager *mgr, const char *repo_id,\n                                   const char *from_email, const char *to_email,\n                                   const char *permission);\n\nGList*\nseaf_share_manager_list_share_repos (SeafShareManager *mgr, const char *email,\n                                     const char *type, int start, int limit,\n                                     gboolean *db_err);\n\nGList *\nseaf_share_manager_list_shared_to (SeafShareManager *mgr,\n                                   const char *owner,\n                                   const char *repo_id);\n\nGList *\nseaf_share_manager_list_repo_shared_to (SeafShareManager *mgr,\n                                        const char *owner,\n                                        const char *repo_id,\n                                        GError **error);\n\nGList *\nseaf_share_manager_list_repo_shared_group (SeafShareManager *mgr,\n                                           const char *from_email,\n                                           const char *repo_id,\n                                           GError **error);\n\nGHashTable *\nseaf_share_manager_get_shared_dirs_to_user (SeafShareManager *mgr,\n                                            const char *orig_repo_id,\n                                            const char *to_email);\n\nGHashTable *\nseaf_share_manager_get_shared_dirs_to_group (SeafShareManager *mgr,\n                                             const char *orig_repo_id,\n                                             GList *groups);\n\nint\nseaf_share_manager_remove_share (SeafShareManager *mgr, const char *repo_id,\n                                 const char *from_email, const char *to_email);\n\nint\nseaf_share_manager_unshare_subdir (SeafShareManager* mgr,\n                                   const char *orig_repo_id,\n                                   const char *path,\n                                   const char *from_email,\n                                   const char *to_email);\n\n\n/* Remove all share info of a repo. */\nint\nseaf_share_manager_remove_repo (SeafShareManager *mgr, const char *repo_id);\n\nchar *\nseaf_share_manager_check_permission (SeafShareManager *mgr,\n                                     const char *repo_id,\n                                     const char *email);\n\nGHashTable *\nseaf_share_manager_get_shared_sub_dirs (SeafShareManager *mgr,\n                                        const char *repo_id,\n                                        const char *path);\n\nint\nseaf_share_manager_is_repo_shared (SeafShareManager *mgr,\n                                   const char *repo_id);\n\nGObject *\nseaf_get_shared_repo_by_path (SeafRepoManager *mgr,\n                              const char *repo_id,\n                              const char *path,\n                              const char *shared_to,\n                              int is_org,\n                              GError **error);\nint\nseaf_share_manager_unshare_group_subdir (SeafShareManager* mgr,\n                                         const char *repo_id,\n                                         const char *path,\n                                         const char *owner,\n                                         int group_id);\n\ngboolean\nseaf_share_manager_repo_has_been_shared (SeafShareManager* mgr,\n                                         const char *repo_id,\n                                         gboolean including_groups);\n\nGList *\nseaf_share_manager_org_get_shared_users_by_repo (SeafShareManager* mgr,\n                                                 int org_id,\n                                                 const char *repo_id);\n\nGList *\nseaf_share_manager_get_shared_users_by_repo (SeafShareManager* mgr,\n                                             const char *repo_id);\n#endif /* SHARE_MGR_H */\n\n"
  },
  {
    "path": "server/size-sched.c",
    "content": "#include \"common.h\"\n\n#include <pthread.h>\n\n#include \"seafile-session.h\"\n#include \"size-sched.h\"\n#include \"diff-simple.h\"\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n#include \"obj-cache.h\"\n\n#define REPO_SIZE_LIST \"repo_size_task\"\n\ntypedef struct SizeSchedulerPriv {\n    pthread_t thread_id;\n    GThreadPool *compute_repo_size_thread_pool;\n    struct ObjCache *cache;\n} SizeSchedulerPriv;\n\ntypedef struct RepoSizeJob {\n    SizeScheduler *sched;\n    char repo_id[37];\n} RepoSizeJob;\n\ntypedef struct RepoInfo {\n    gchar *head_id;\n    gint64 size;\n    gint64 file_count;\n} RepoInfo;\n\nstatic void*\ncompute_repo_size (void *vjob);\nstatic void\ncompute_task (void *data, void *user_data);\nstatic void*\nlog_unprocessed_task_thread (void *arg);\n\n#define DEFAULT_SCHEDULE_THREAD_NUMBER 1;\n\nSizeScheduler *\nsize_scheduler_new (SeafileSession *session)\n{\n    GError *error = NULL;\n    SizeScheduler *sched = g_new0 (SizeScheduler, 1);\n    int sched_thread_num;\n\n    if (!sched)\n        return NULL;\n\n    sched->priv = g_new0 (SizeSchedulerPriv, 1);\n    if (!sched->priv) {\n        g_free (sched);\n        return NULL;\n    }\n\n    sched->priv->cache = session->obj_cache;\n\n    sched->seaf = session;\n\n    sched_thread_num = g_key_file_get_integer (session->config, \"scheduler\", \"size_sched_thread_num\", NULL);\n\n    if (sched_thread_num == 0)\n        sched_thread_num = DEFAULT_SCHEDULE_THREAD_NUMBER;\n\n    sched->priv->compute_repo_size_thread_pool = g_thread_pool_new (compute_task, NULL,\n                                                                    sched_thread_num, FALSE, &error);\n    if (!sched->priv->compute_repo_size_thread_pool) {\n        if (error) {\n            seaf_warning (\"Failed to create compute repo size thread pool: %s.\\n\", error->message);\n        } else {\n            seaf_warning (\"Failed to create repo size thread pool.\\n\");\n        }\n\n        g_clear_error (&error);\n        g_free (sched->priv);\n        g_free (sched);\n        return NULL;\n    }\n\n    return sched;\n}\n\nint\nsize_scheduler_start (SizeScheduler *scheduler)\n{\n    int ret = pthread_create (&scheduler->priv->thread_id, NULL, log_unprocessed_task_thread, scheduler);\n    if (ret < 0) {\n        seaf_warning (\"Failed to create log unprocessed task thread.\\n\");\n        return -1;\n    }\n    pthread_detach (scheduler->priv->thread_id);\n\n    return 0;\n}\n\nvoid\nschedule_repo_size_computation (SizeScheduler *scheduler, const char *repo_id)\n{\n    RepoSizeJob *job = g_new0(RepoSizeJob, 1);\n\n    job->sched = scheduler;\n    memcpy (job->repo_id, repo_id, 37);\n\n    g_thread_pool_push (scheduler->priv->compute_repo_size_thread_pool, job, NULL);\n}\n\n#define PRINT_UNPROCESSED_TASKS_INTERVAL 30\n\nvoid *log_unprocessed_task_thread (void *arg)\n{\n    SizeScheduler *sched = arg;\n    guint unprocessed_num;\n\n    while (1) {\n        unprocessed_num = g_thread_pool_unprocessed (sched->priv->compute_repo_size_thread_pool);\n\n        if (unprocessed_num > 10)\n            seaf_message (\"The number of repo size update tasks in queue is %u\\n\",\n                          unprocessed_num);\n\n        sleep (PRINT_UNPROCESSED_TASKS_INTERVAL);\n    }\n\n    return NULL;\n}\n\nstatic void\ncompute_task (void *data, void *user_data)\n{\n    RepoSizeJob *job = data;\n\n    compute_repo_size (job);\n\n    g_free (job);\n}\n\nstatic gboolean get_head_id (SeafDBRow *row, void *data)\n{\n    char *head_id_out = data;\n    const char *head_id;\n\n    head_id = seaf_db_row_get_column_text (row, 0);\n    memcpy (head_id_out, head_id, 40);\n\n    return FALSE;\n}\n\nstatic int\nset_repo_size_and_file_count (SeafDB *db,\n                              const char *repo_id,\n                              const char *new_head_id,\n                              gint64 size,\n                              gint64 file_count)\n{\n    SeafDBTrans *trans;\n    char *sql;\n    char cached_head_id[41] = {0};\n    int ret = 0;\n\n    trans = seaf_db_begin_transaction (db);\n    if (!trans)\n        return -1;\n\n    sql = \"SELECT head_id FROM RepoSize WHERE repo_id=?\";\n\n    int n = seaf_db_trans_foreach_selected_row (trans, sql,\n                                                get_head_id,\n                                                cached_head_id,\n                                                1, \"string\", repo_id);\n    if (n < 0) {\n        ret = -1;\n        goto rollback;\n    }\n\n    if (n == 0) {\n        /* Size not set before. */\n        sql = \"INSERT INTO RepoSize (repo_id, size, head_id) VALUES (?, ?, ?)\";\n        if (seaf_db_trans_query (trans, sql, 3, \"string\", repo_id, \"int64\", size,\n                                 \"string\", new_head_id) < 0) {\n            ret = -1;\n            goto rollback;\n        }\n    } else {\n        sql = \"UPDATE RepoSize SET size = ?, head_id = ? WHERE repo_id = ?\";\n        if (seaf_db_trans_query (trans, sql, 3, \"int64\", size, \"string\", new_head_id,\n                                 \"string\", repo_id) < 0) {\n            ret = -1;\n            goto rollback;\n        }\n    }\n\n    gboolean exist;\n    gboolean db_err;\n\n    exist = seaf_db_trans_check_for_existence (trans,\n                                               \"SELECT 1 FROM RepoFileCount WHERE repo_id=?\",\n                                               &db_err, 1, \"string\", repo_id);\n    if (db_err) {\n        ret = -1;\n        goto rollback;\n    }\n\n    if (exist) {\n        if (seaf_db_trans_query (trans,\n                                 \"UPDATE RepoFileCount SET file_count=? WHERE repo_id=?\",\n                                 2, \"int64\", file_count, \"string\", repo_id) < 0) {\n            ret = -1;\n            goto rollback;\n        }\n    } else {\n        if (seaf_db_trans_query (trans,\n                                 \"INSERT INTO RepoFileCount (repo_id,file_count) VALUES (?,?)\",\n                                 2, \"string\", repo_id, \"int64\", file_count) < 0) {\n            ret = -1;\n            goto rollback;\n        }\n    }\n\n    if (seaf_db_commit (trans) < 0) {\n        ret = -1;\n        goto rollback;\n    }\n\n    seaf_db_trans_close (trans);\n\n    return ret;\n\nrollback:\n    seaf_db_rollback (trans);\n    seaf_db_trans_close (trans);\n    return ret;\n}\n\nstatic gboolean\ncreate_old_repo_info (SeafDBRow *row, void *data)\n{\n    RepoInfo **info = data;\n\n    const char *head_id = seaf_db_row_get_column_text (row, 0);\n    gint64 size = seaf_db_row_get_column_int64 (row, 1);\n    gint64 file_count = seaf_db_row_get_column_int64 (row, 2);\n\n    if (!head_id)\n        return FALSE;\n    \n    *info = g_new0(RepoInfo, 1);\n    if (!*info)\n        return FALSE;\n    (*info)->head_id = g_strdup(head_id);\n    (*info)->size = size;\n    (*info)->file_count = file_count;\n\n    return TRUE;\n}\n\nstatic RepoInfo*\nget_old_repo_info_from_db (SeafDB *db, const char *repo_id, gboolean *is_db_err)\n{\n    RepoInfo *info = NULL;\n    char *sql;\n\n    switch (seaf_db_type (db)) {\n    case SEAF_DB_TYPE_MYSQL:\n    case SEAF_DB_TYPE_PGSQL:\n        sql = \"select s.head_id,s.size,f.file_count FROM \"\n            \"RepoSize s LEFT JOIN RepoFileCount f ON \"\n            \"s.repo_id=f.repo_id WHERE \"\n            \"s.repo_id=? FOR UPDATE\";\n        break;\n    case SEAF_DB_TYPE_SQLITE:\n        sql = \"select s.head_id,s.size,f.file_count FROM \"\n            \"RepoSize s LEFT JOIN RepoFileCount f ON \"\n            \"s.repo_id=f.repo_id WHERE \"\n            \"s.repo_id=?\";\n        break;\n    default:\n        seaf_warning(\"Unexpected database type.\\n\");\n        *is_db_err = TRUE;\n        return NULL;\n    }\n    int ret = seaf_db_statement_foreach_row (db, sql,\n                                             create_old_repo_info, &info,\n                                             1, \"string\", repo_id);\n    if (ret < 0)\n        *is_db_err = TRUE;\n\n    return info;\n\n}\n\nstatic void\nnotify_repo_size_change (SizeScheduler *sched, const char *repo_id)\n{\n    ObjCache *cache =  sched->priv->cache;\n    if (!cache) {\n        return;\n    }\n\n    json_t *obj = NULL;\n    char *msg = NULL;\n\n    obj = json_object ();\n\n    json_object_set_new (obj, \"repo_id\", json_string(repo_id));\n\n    msg = json_dumps (obj, JSON_COMPACT);\n\n    objcache_push (cache, REPO_SIZE_LIST, msg);\n\nout:\n    g_free (msg);\n    json_decref (obj);\n}\n\nstatic void*\ncompute_repo_size (void *vjob)\n{\n    RepoSizeJob *job = vjob;\n    SizeScheduler *sched = job->sched;\n    SeafRepo *repo = NULL;\n    SeafCommit *head = NULL;\n    SeafCommit *old_head = NULL;\n    GObject *file_count_info = NULL;\n    gint64 size = 0;\n    gint64 file_count = 0;\n    int ret;\n    RepoInfo *info = NULL;\n    GError *error = NULL;\n    gboolean is_db_err = FALSE;\n\n    repo = seaf_repo_manager_get_repo (sched->seaf->repo_mgr, job->repo_id);\n    if (!repo) {\n        seaf_warning (\"[scheduler] failed to get repo %s.\\n\", job->repo_id);\n        return vjob;\n    }\n\n    info = get_old_repo_info_from_db(sched->seaf->db, job->repo_id, &is_db_err);\n    if (is_db_err)\n        goto out;\n    if (info && g_strcmp0 (info->head_id, repo->head->commit_id) == 0)\n        goto out;\n\n    head = seaf_commit_manager_get_commit (sched->seaf->commit_mgr,\n                                           repo->id, repo->version,\n                                           repo->head->commit_id);\n    if (!head) {\n        seaf_warning (\"[scheduler] failed to get head commit %s.\\n\",\n                   repo->head->commit_id);\n        goto out;\n    }\n\n    if (info)\n        old_head = seaf_commit_manager_get_commit (sched->seaf->commit_mgr,\n                                                   repo->id, repo->version,\n                                                   info->head_id);\n\n    if (info && (info->file_count != 0) && old_head){\n        gint64 change_size = 0;\n        gint64 change_file_count = 0;\n        GList *diff_entries = NULL;\n        \n        ret = diff_commits (old_head, head, &diff_entries, FALSE);\n        if (ret < 0) {\n            seaf_warning(\"[scheduler] failed to do diff.\\n\");\n            goto out;\n        }\n        GList *des = NULL;\n        for (des = diff_entries; des ; des = des->next){\n            DiffEntry *diff_entry = des->data;\n            if (diff_entry->status == DIFF_STATUS_DELETED){            \n                change_size -= diff_entry->size;\n                --change_file_count;\n            }\n            else if (diff_entry->status == DIFF_STATUS_ADDED){\n                change_size += diff_entry->size;\n                ++change_file_count;\n            }\n            else if (diff_entry->status == DIFF_STATUS_MODIFIED)\n                change_size = change_size + diff_entry->size - diff_entry->origin_size;\n        }\n        size = info->size + change_size;\n        file_count = info->file_count + change_file_count;\n\n        g_list_free_full (diff_entries, (GDestroyNotify)diff_entry_free);\n    } else {\n        file_count_info = seaf_fs_manager_get_file_count_info_by_path (seaf->fs_mgr,\n                                                                       repo->store_id,\n                                                                       repo->version,\n                                                                       repo->root_id,\n                                                                       \"/\", &error);\n        if (!file_count_info) {\n            seaf_warning (\"[scheduler] failed to get file count info.\\n\");\n            g_clear_error (&error);\n            goto out;\n        }\n        g_object_get (file_count_info, \"file_count\", &file_count, \"size\", &size, NULL);\n        g_object_unref (file_count_info);\n    }\n\n    ret = set_repo_size_and_file_count (sched->seaf->db,\n                                        job->repo_id,\n                                        repo->head->commit_id,\n                                        size,\n                                        file_count);\n    \n    if (ret < 0) {\n        seaf_warning (\"[scheduler] failed to store repo size and file count %s.\\n\", job->repo_id);\n        goto out;\n    }\n\n    notify_repo_size_change (sched, repo->store_id);\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (head);\n    seaf_commit_unref (old_head);\n    if (info)\n        g_free (info->head_id);\n    g_free (info);\n\n    return vjob;\n}\n\n"
  },
  {
    "path": "server/size-sched.h",
    "content": "#ifndef SIZE_SCHEDULER_H\n#define SIZE_SCHEDULER_H\n\nstruct _SeafileSession;\n\nstruct SizeSchedulerPriv;\n\ntypedef struct SizeScheduler {\n    struct _SeafileSession *seaf;\n\n    struct SizeSchedulerPriv *priv;\n} SizeScheduler;\n\nSizeScheduler *\nsize_scheduler_new (struct _SeafileSession *session);\n\nint\nsize_scheduler_start (SizeScheduler *scheduler);\n\nvoid\nschedule_repo_size_computation (SizeScheduler *scheduler, const char *repo_id);\n\n#endif\n"
  },
  {
    "path": "server/upload-file.c",
    "content": "#include \"common.h\"\n\n#ifdef HAVE_EVHTP\n#define DEBUG_FLAG SEAFILE_DEBUG_HTTP\n#include \"log.h\"\n\n#include <getopt.h>\n#include <fcntl.h>\n\n#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)\n#include <event2/event.h>\n#else\n#include <event.h>\n#endif\n\n#include <evhtp.h>\n\n#include <jansson.h>\n\n#include <pthread.h>\n\n#include \"seafile-object.h\"\n\n#include \"utils.h\"\n\n#include \"seafile-session.h\"\n#include \"upload-file.h\"\n#include \"http-status-codes.h\"\n#include \"http-server.h\"\n\n#include \"seafile-error.h\"\n\nenum RecvState {\n    RECV_INIT,\n    RECV_HEADERS,\n    RECV_CONTENT,\n    RECV_ERROR,\n};\n\nenum UploadError {\n    ERROR_FILENAME,\n    ERROR_EXISTS,\n    ERROR_NOT_EXIST,\n    ERROR_SIZE,\n    ERROR_QUOTA,\n    ERROR_FORBIDDEN,\n    ERROR_RECV,\n    ERROR_BLOCK_MISSING,\n    ERROR_INTERNAL,\n};\n\ntypedef struct Progress {\n    gint64 uploaded;\n    gint64 size;\n} Progress;\n\ntypedef struct RecvFSM {\n    int state;\n\n    char *repo_id;\n    char *user;\n    char *boundary;        /* boundary of multipart form-data. */\n    char *input_name;      /* input name of the current form field. */\n    char *parent_dir;\n    evbuf_t *line;          /* buffer for a line */\n\n    GHashTable *form_kvs;       /* key/value of form fields */\n    GList *filenames;           /* uploaded file names */\n    GList *files;               /* paths for completely uploaded tmp files. */\n\n    gboolean recved_crlf; /* Did we recv a CRLF when write out the last line? */\n    char *file_name;\n    char *tmp_file; /* tmp file path for the currently uploading file */\n    int fd;\n    char *resumable_tmp_file;        /* resumable upload tmp file path. In resumable uploads, contents of the chunks are appended to this tmp file. */\n\n    /* For upload progress. */\n    char *progress_id;\n    Progress *progress;\n\n    char *token_type; /* For sending statistic type */\n\n    gboolean need_idx_progress;\n\n    gint64 rstart;\n    gint64 rend;\n    gint64 fsize;\n} RecvFSM;\n\n#define MAX_CONTENT_LINE 10240\n\nstatic GHashTable *upload_progress;\nstatic pthread_mutex_t pg_lock;\nstatic int\nwrite_block_data_to_tmp_file (RecvFSM *fsm, const char *parent_dir,\n                              const char *file_name);\n\n/* IE8 will set filename to the full path of the uploaded file.\n * So we need to strip out the basename from it.\n */\nstatic char *\nget_basename (const char *path)\n{\n    int i = strlen(path) - 1;\n\n    while (i >= 0) {\n        if (path[i] == '/' || path[i] == '\\\\')\n            break;\n        --i;\n    }\n\n    if (i < 0)\n        return g_strdup(path);\n\n    return g_strdup(&path[i+1]);\n}\n\n/* It's a bug of libevhtp that it doesn't set Content-Length automatically\n * in response to a multipart request.\n * Just add it in our code.\n */\nstatic void\nset_content_length_header (evhtp_request_t *req)\n{\n    char lstr[128];\n\n#ifdef WIN32\n    snprintf(lstr, sizeof(lstr), \"%lu\", (unsigned long)(evbuffer_get_length(req->buffer_out)));\n#else\n    snprintf(lstr, sizeof(lstr), \"%zu\", evbuffer_get_length(req->buffer_out));\n#endif\n\n    evhtp_headers_add_header(req->headers_out,\n                             evhtp_header_new(\"Content-Length\", lstr, 1, 1));\n}\n\nstatic gint64\nget_content_length (evhtp_request_t *req)\n{\n    const char *content_len_str = evhtp_kv_find (req->headers_in, \"Content-Length\");\n    if (!content_len_str) {\n        return -1;\n    }\n\n    return strtoll (content_len_str, NULL, 10);\n}\n\nstatic void\nsend_error_reply (evhtp_request_t *req, evhtp_res code, char *error)\n{\n    if (error)\n        evbuffer_add_printf (req->buffer_out, \"{\\\"error\\\": \\\"%s\\\"}\", error);\n    set_content_length_header (req);\n    evhtp_headers_add_header (\n        req->headers_out,\n        evhtp_header_new(\"Content-Type\", \"application/json; charset=utf-8\", 1, 1));\n    evhtp_send_reply (req, code);\n}\n\nstatic void\nsend_success_reply (evhtp_request_t *req)\n{\n    set_content_length_header (req);\n    evhtp_headers_add_header (\n        req->headers_out,\n        evhtp_header_new(\"Content-Type\", \"application/json; charset=utf-8\", 1, 1));\n    evhtp_send_reply (req, EVHTP_RES_OK);\n}\n\nstatic void\nsend_success_reply_ie8_compatible (evhtp_request_t *req, evhtp_res code)\n{\n    set_content_length_header (req);\n\n    const char *accept = evhtp_kv_find (req->headers_in, \"Accept\");\n    if (accept && strstr (accept, \"application/json\") != NULL) {\n        evhtp_headers_add_header (\n            req->headers_out,\n            evhtp_header_new(\"Content-Type\", \"application/json; charset=utf-8\", 1, 1));\n    } else {\n        evhtp_headers_add_header (\n            req->headers_out,\n            evhtp_header_new(\"Content-Type\", \"text/plain\", 1, 1));\n    }\n    evhtp_send_reply (req, code);\n}\n\nstatic void\nsend_reply_by_error_code (evhtp_request_t *req, int error_code)\n{\n    switch (error_code) {\n    case ERROR_FILENAME:\n        send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, \"Invalid filename.\\n\");\n        break;\n    case ERROR_EXISTS:\n        send_error_reply (req, SEAF_HTTP_RES_EXISTS, \"File already exists.\\n\");\n        break;\n    case ERROR_NOT_EXIST:\n        send_error_reply (req, SEAF_HTTP_RES_NOT_EXISTS, \"File does not exist.\\n\");\n        break;\n    case ERROR_SIZE:\n        send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, \"File size is too large.\\n\");\n        break;\n    case ERROR_QUOTA:\n        send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, \"Out of quota.\\n\");\n        break;\n    case ERROR_BLOCK_MISSING:\n        send_error_reply (req, SEAF_HTTP_RES_BLOCK_MISSING, \"Block missing.\\n\");\n        break;\n    case ERROR_FORBIDDEN:\n        send_error_reply (req, SEAF_HTTP_RES_FORBIDDEN, \"Permission denied.\");\n        break;\n    case ERROR_RECV:\n    case ERROR_INTERNAL:\n        send_error_reply (req, EVHTP_RES_SERVERR, \"Internal error\\n\");\n        break;\n    }\n}\n\nstatic gboolean\ncheck_tmp_file_list (GList *tmp_files, int *error_code)\n{\n    GList *ptr;\n    char *tmp_file;\n    SeafStat st;\n    gint64 total_size = 0;\n\n    for (ptr = tmp_files; ptr; ptr = ptr->next) {\n        tmp_file = ptr->data;\n\n        if (seaf_stat (tmp_file, &st) < 0) {\n            seaf_warning (\"[upload] Failed to stat temp file %s.\\n\", tmp_file);\n            *error_code = ERROR_RECV;\n            return FALSE;\n        }\n\n        total_size += (gint64)st.st_size;\n    }\n    \n    if (seaf->max_upload_size > 0 && total_size > seaf->max_upload_size) {\n        seaf_debug (\"[upload] File size is too large.\\n\");\n        *error_code = ERROR_SIZE;\n        return FALSE;\n    }\n\n    return TRUE;\n}\n\nstatic char *\nget_canonical_path (const char *path)\n{\n    char *ret = g_strdup (path);\n    char *p;\n\n    for (p = ret; *p != 0; ++p) {\n        if (*p == '\\\\')\n            *p = '/';\n    }\n\n    /* Remove trailing slashes from dir path. */\n    int len = strlen(ret);\n    int i = len - 1;\n    while (i >= 0 && ret[i] == '/')\n        ret[i--] = 0;\n\n    return ret;\n}\n\nstatic gboolean\ncheck_parent_dir (evhtp_request_t *req, const char *repo_id,\n                  const char *parent_dir)\n{\n    char *canon_path = NULL;\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL;\n    SeafDir *dir = NULL;\n    GError *error = NULL;\n    gboolean ret = TRUE;\n\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"[upload] Failed to get repo %.8s.\\n\", repo_id);\n        send_error_reply (req, EVHTP_RES_SERVERR, \"Failed to get repo.\\n\");\n        return FALSE;\n    }\n\n    commit = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             repo->id, repo->version,\n                                             repo->head->commit_id);\n    if (!commit) {\n        seaf_warning (\"[upload] Failed to get head commit for repo %.8s.\\n\", repo_id);\n        send_error_reply (req, EVHTP_RES_SERVERR, \"Failed to get head commit.\\n\");\n        seaf_repo_unref (repo);\n        return FALSE;\n    }\n\n    canon_path = get_canonical_path (parent_dir);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                               repo->store_id, repo->version,\n                                               commit->root_id,\n                                               canon_path, &error);\n    if (dir) {\n        seaf_dir_free (dir);\n    } else {\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Parent dir doesn't exist.\\n\");\n        ret = FALSE;\n    }\n\n    g_clear_error (&error);\n    g_free (canon_path);\n    seaf_commit_unref (commit);\n    seaf_repo_unref (repo);\n\n    return ret;\n}\n\nstatic gboolean\nis_parent_matched (const char *upload_dir,\n                   const char *parent_dir)\n{\n    gboolean ret = TRUE;\n    char *upload_dir_canon = NULL;\n    char *parent_dir_canon = NULL;\n\n    upload_dir_canon = get_canonical_path (upload_dir);\n    parent_dir_canon = get_canonical_path (parent_dir);\n\n    if (strcmp (upload_dir_canon,parent_dir_canon) != 0) {\n        ret = FALSE;\n    }\n\n    g_free (upload_dir_canon);\n    g_free (parent_dir_canon);\n\n    return ret;\n}\n\nstatic char *\nfile_list_to_json (GList *files)\n{\n    json_t *array;\n    GList *ptr;\n    char *file;\n    char *json_data;\n    char *ret;\n\n    array = json_array ();\n\n    for (ptr = files; ptr; ptr = ptr->next) {\n        file = ptr->data;\n        json_array_append_new (array, json_string(file));\n    }\n\n    json_data = json_dumps (array, 0);\n    json_decref (array);\n\n    ret = g_strdup (json_data);\n    free (json_data);\n    return ret;\n}\n\nstatic int\ncreate_relative_path (RecvFSM *fsm, char *parent_dir, char *relative_path)\n{\n    int rc = 0;\n    GError *error = NULL;\n\n    if (!relative_path)\n        return 0;\n\n    rc = seaf_repo_manager_mkdir_with_parents (seaf->repo_mgr,\n                                               fsm->repo_id,\n                                               parent_dir,\n                                               relative_path,\n                                               fsm->user,\n                                               &error);\n    if (rc < 0) {\n        if (error) {\n            seaf_warning (\"[upload folder] %s.\", error->message);\n            g_clear_error (&error);\n        }\n    }\n\n    return rc;\n}\n\nstatic char *\nfile_id_list_from_json (const char *ret_json)\n{\n    json_t *array, *obj, *value;\n    json_error_t err;\n    size_t index;\n    GString *id_list;\n\n    array = json_loadb (ret_json, strlen(ret_json), 0, &err);\n    if (!array) {\n        seaf_warning (\"Failed to load ret_json: %s.\\n\", err.text);\n        return NULL;\n    }\n\n    id_list = g_string_new (NULL);\n    size_t n = json_array_size (array);\n    for (index = 0; index < n; index++) {\n        obj = json_array_get (array, index);\n        value = json_object_get (obj, \"id\");\n        const char *id = json_string_value (value);\n        g_string_append (id_list, id);\n        if (index != n - 1)\n            g_string_append (id_list, \"\\t\");\n    }\n\n    json_decref (array);\n    return g_string_free (id_list, FALSE);\n}\n\nstatic gint64\nrfc3339_to_timestamp (const char *last_modify)\n{\n    if (!last_modify) {\n        return -1;\n    }\n    GDateTime *date_time = g_date_time_new_from_iso8601(last_modify, NULL);\n    if (!date_time) {\n        return -1;\n    }\n    gint64 mtime = g_date_time_to_unix(date_time);\n\n    g_date_time_unref(date_time);\n    return mtime;\n}\n\nstatic void\nupload_api_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *parent_dir, *replace_str;\n    char *relative_path = NULL, *new_parent_dir = NULL;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    GError *error = NULL;\n    int error_code = -1;\n    char *filenames_json, *tmp_files_json;\n    int replace = 0;\n    int rc;\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Headers\",\n                                               \"x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Methods\",\n                                               \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                               \"*\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Max-Age\",\n                                               \"86400\", 1, 1));\n\n    if (evhtp_request_get_method(req) == htp_method_OPTIONS) {\n        /* If CORS preflight header, then create an empty body response (200 OK)\n         * and return it.\n         */\n        send_success_reply (req);\n        return;\n    }\n\n    /* After upload_headers_cb() returns an error, libevhtp may still\n     * receive data from the web browser and call into this cb.\n     * In this case fsm will be NULL.\n     */\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    if (!fsm->filenames) {\n        seaf_debug (\"[upload] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        return;\n    }\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    replace_str = g_hash_table_lookup (fsm->form_kvs, \"replace\");\n    if (replace_str) {\n        replace = atoi(replace_str);\n        if (replace != 0 && replace != 1) {\n            seaf_debug (\"[Upload] Invalid argument replace: %s.\\n\", replace_str);\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid argument replace.\\n\");\n            return;\n        }\n    }\n    parent_dir = g_hash_table_lookup (fsm->form_kvs, \"parent_dir\");\n    if (!parent_dir) {\n        seaf_debug (\"[upload] No parent dir given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid parent dir.\\n\");\n        return;\n    }\n    relative_path = g_hash_table_lookup (fsm->form_kvs, \"relative_path\");\n    if (relative_path != NULL) {\n        if (relative_path[0] == '/' || relative_path[0] == '\\\\') {\n            seaf_warning (\"Invalid relative path %s.\\n\", relative_path);\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid relative path.\");\n            return;\n        }\n        char *tmp_p = get_canonical_path(parent_dir);\n        char *tmp_r = get_canonical_path(relative_path);\n        new_parent_dir = g_build_path(\"/\", tmp_p, tmp_r, NULL);\n        g_free(tmp_p);\n        g_free(tmp_r);\n    } else {\n        new_parent_dir = get_canonical_path(parent_dir);\n    }\n\n    if (fsm->rstart >= 0) {\n        if (fsm->filenames->next) {\n            seaf_debug (\"[upload] Breakpoint transfer only support one file in one request.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"More files in one request.\\n\");\n            goto out;\n        }\n\n        if (parent_dir[0] != '/') {\n            seaf_debug (\"[upload] Invalid parent dir, should start with /.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid parent dir.\\n\");\n            goto out;\n        }\n\n        if (!fsm->resumable_tmp_file)\n            fsm->resumable_tmp_file = g_build_path (\"/\", new_parent_dir, (char *)fsm->filenames->data, NULL);\n\n        if (write_block_data_to_tmp_file (fsm, new_parent_dir,\n                                          (char *)fsm->filenames->data) < 0) {\n            error_code = ERROR_INTERNAL;\n            goto out;\n        }\n        if (fsm->rend != fsm->fsize - 1) {\n            const char *success_str = \"{\\\"success\\\": true}\";\n            evbuffer_add (req->buffer_out, success_str, strlen(success_str));\n            send_success_reply_ie8_compatible (req, EVHTP_RES_OK);\n            goto out;\n        }\n    }\n\n    if (!fsm->files) {\n        seaf_debug (\"[upload] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        goto out;\n    }\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        goto out;\n\n    if (!fsm->parent_dir || !is_parent_matched (fsm->parent_dir, parent_dir)){\n        error_code = ERROR_FORBIDDEN;\n        goto out;\n    }\n\n    if (!check_tmp_file_list (fsm->files, &error_code))\n        goto out;\n\n    gint64 content_len;\n    if (fsm->fsize > 0)\n        content_len = fsm->fsize;\n    else\n        content_len = get_content_length (req);\n    if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                   fsm->repo_id,\n                                                   content_len) != 0) {\n        error_code = ERROR_QUOTA;\n        goto out;\n    }\n\n    rc = create_relative_path (fsm, parent_dir, relative_path);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        goto out;\n    }\n\n    filenames_json = file_list_to_json (fsm->filenames);\n    tmp_files_json = file_list_to_json (fsm->files);\n\n    char *ret_json = NULL;\n    char *task_id = NULL;\n    rc = seaf_repo_manager_post_multi_files (seaf->repo_mgr,\n                                             fsm->repo_id,\n                                             new_parent_dir,\n                                             filenames_json,\n                                             tmp_files_json,\n                                             fsm->user,\n                                             replace,\n                                             mtime,\n                                             &ret_json,\n                                             fsm->need_idx_progress ? &task_id : NULL,\n                                             &error);\n    g_free (filenames_json);\n    g_free (tmp_files_json);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (error->code == POST_FILE_ERR_FILENAME) {\n                error_code = ERROR_FILENAME;\n            } else if (error->code == SEAF_ERR_FILES_WITH_SAME_NAME) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_BADREQ, \"Too many files with same name.\\n\");\n            } else if (error->code == SEAF_ERR_GC_CONFLICT) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_CONFLICT, \"GC Conflict.\\n\");\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n\n    if (task_id) {\n        evbuffer_add (req->buffer_out, task_id, strlen(task_id));\n        g_free (task_id);\n    } else {\n        const char *use_json = evhtp_kv_find (req->uri->query, \"ret-json\");\n        if (use_json) {\n            evbuffer_add (req->buffer_out, ret_json, strlen(ret_json));\n        } else {\n            char *new_ids = file_id_list_from_json (ret_json);\n            if (new_ids)\n                evbuffer_add (req->buffer_out, new_ids, strlen(new_ids));\n            g_free (new_ids);\n        }\n    }\n    g_free (ret_json);\n\n    send_success_reply (req);\n\n    char *oper = \"web-file-upload\";\n    if (g_strcmp0(fsm->token_type, \"upload-link\") == 0)\n        oper = \"link-file-upload\";\n    send_statistic_msg(fsm->repo_id, fsm->user, oper, (guint64)content_len);\n\nout:\n    g_free(new_parent_dir);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\n\nstatic void\nupload_raw_blks_api_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    GError *error = NULL;\n    int error_code = -1;\n    char *blockids_json, *tmp_files_json;\n\n    /* After upload_headers_cb() returns an error, libevhtp may still\n     * receive data from the web browser and call into this cb.\n     * In this case fsm will be NULL.\n     */\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    if (!check_tmp_file_list (fsm->files, &error_code))\n        goto out;\n\n    blockids_json = file_list_to_json (fsm->filenames);\n    tmp_files_json = file_list_to_json (fsm->files);\n\n    int rc = seaf_repo_manager_post_blocks (seaf->repo_mgr,\n                                            fsm->repo_id,\n                                            blockids_json,\n                                            tmp_files_json,\n                                            fsm->user,\n                                            &error);\n    g_free (blockids_json);\n    g_free (tmp_files_json);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (error->code == POST_FILE_ERR_FILENAME) {\n                error_code = ERROR_FILENAME;\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n    guint64 content_len = (guint64)get_content_length(req);\n    send_statistic_msg(fsm->repo_id, fsm->user, \"web-file-upload\", content_len);\n\n    evbuffer_add (req->buffer_out, \"\\\"OK\\\"\", 4);\n    send_success_reply (req);\n\nout:\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\nstatic void\nupload_blks_api_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    const char *parent_dir, *file_name, *size_str, *replace_str, *commitonly_str;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    GError *error = NULL;\n    int error_code = -1;\n    char *blockids_json;\n    gint64 file_size = -1;\n    int replace = 0;\n\n    /* After upload_headers_cb() returns an error, libevhtp may still\n     * receive data from the web browser and call into this cb.\n     * In this case fsm will be NULL.\n     */\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    replace_str = g_hash_table_lookup (fsm->form_kvs, \"replace\");\n    if (replace_str) {\n        replace = atoi(replace_str);\n        if (replace != 0 && replace != 1) {\n            seaf_debug (\"[Upload-blks] Invalid argument replace: %s.\\n\", replace_str);\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid argument replace.\\n\");\n            return;\n        }\n    }\n    parent_dir = g_hash_table_lookup (fsm->form_kvs, \"parent_dir\");\n    file_name = g_hash_table_lookup (fsm->form_kvs, \"file_name\");\n    size_str = g_hash_table_lookup (fsm->form_kvs, \"file_size\");\n    if (size_str)\n        file_size = atoll(size_str);\n    commitonly_str = evhtp_kv_find (req->uri->query, \"commitonly\");\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    if (!file_name || !parent_dir || !size_str || file_size < 0) {\n        seaf_debug (\"[upload-blks] No parent dir or file name given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No parent dir or file name.\\n\");\n        return;\n    }\n    if (!commitonly_str) {\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Only commit suppported.\\n\");\n        return;\n    }\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        return;\n\n    char *new_file_id = NULL;\n    int rc = 0;\n    /* if (!commitonly_str) { */\n    /*     gint64 content_len = get_content_length (req); */\n    /*     if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */\n    /*                                                    fsm->repo_id, */\n    /*                                                    content_len) != 0) { */\n    /*         error_code = ERROR_QUOTA; */\n    /*         goto error; */\n    /*     } */\n\n    /*     if (!check_tmp_file_list (fsm->files, &error_code)) */\n    /*         goto error; */\n    /*     blockids_json = file_list_to_json (fsm->filenames); */\n    /*     tmp_files_json = file_list_to_json (fsm->files); */\n\n    /*     rc = seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */\n    /*                                              fsm->repo_id, */\n    /*                                              parent_dir, */\n    /*                                              file_name, */\n    /*                                              blockids_json, */\n    /*                                              tmp_files_json, */\n    /*                                              fsm->user, */\n    /*                                              file_size, */\n    /*                                              replace, */\n    /*                                              &new_file_id, */\n    /*                                              &error); */\n    /*     g_free (blockids_json); */\n    /*     g_free (tmp_files_json); */\n    /* } else { */\n\n    blockids_json = g_hash_table_lookup (fsm->form_kvs, \"blockids\");\n    if (blockids_json == NULL) {\n        seaf_debug (\"[upload-blks] No blockids given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No blockids.\\n\");\n        return;\n    }\n    rc = seaf_repo_manager_commit_file_blocks (seaf->repo_mgr,\n                                               fsm->repo_id,\n                                               parent_dir,\n                                               file_name,\n                                               blockids_json,\n                                               fsm->user,\n                                               file_size,\n                                               replace,\n                                               mtime,\n                                               &new_file_id,\n                                               &error);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (error->code == POST_FILE_ERR_FILENAME) {\n                error_code = ERROR_FILENAME;\n            } else if (error->code == POST_FILE_ERR_BLOCK_MISSING) {\n                error_code = ERROR_BLOCK_MISSING;\n            } else if (error->code == POST_FILE_ERR_QUOTA_FULL) {\n                error_code = ERROR_QUOTA;\n            } else if (error->code == SEAF_ERR_GC_CONFLICT) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_CONFLICT, \"GC Conflict.\\n\");\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n\n    const char *use_json = evhtp_kv_find (req->uri->query, \"ret-json\");\n    if (use_json) {\n        json_t *json = json_object ();\n        json_object_set_string_member(json, \"id\", new_file_id);\n        char *json_data = json_dumps (json, 0);\n        evbuffer_add (req->buffer_out, json_data, strlen(json_data));\n        json_decref (json);\n        free (json_data);\n    } else {\n        evbuffer_add (req->buffer_out, \"\\\"\", 1);\n        evbuffer_add (req->buffer_out, new_file_id, strlen(new_file_id));\n        evbuffer_add (req->buffer_out, \"\\\"\", 1);\n    }\n    send_success_reply (req);\n\nout:\n    g_free (new_file_id);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\n/* static void */\n/* upload_blks_ajax_cb(evhtp_request_t *req, void *arg) */\n/* { */\n/*     RecvFSM *fsm = arg; */\n/*     char *parent_dir, *file_name, *size_str; */\n/*     GError *error = NULL; */\n/*     int error_code = ERROR_INTERNAL; */\n/*     char *blockids_json, *tmp_files_json; */\n/*     gint64 file_size = -1; */\n\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Headers\", */\n/*                                                \"x-requested-with, content-type, accept, origin, authorization\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Methods\", */\n/*                                                \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Origin\", */\n/*                                                \"*\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Max-Age\", */\n/*                                                \"86400\", 1, 1)); */\n\n/*     if (evhtp_request_get_method(req) == htp_method_OPTIONS) { */\n/*         /\\* If CORS preflight header, then create an empty body response (200 OK) */\n/*          * and return it. */\n/*          *\\/ */\n/*         send_success_reply (req); */\n/*         return; */\n/*     } */\n\n/*     /\\* After upload_headers_cb() returns an error, libevhtp may still */\n/*      * receive data from the web browser and call into this cb. */\n/*      * In this case fsm will be NULL. */\n/*      *\\/ */\n/*     if (!fsm || fsm->state == RECV_ERROR) */\n/*         return; */\n\n/*     parent_dir = g_hash_table_lookup (fsm->form_kvs, \"parent_dir\"); */\n/*     file_name = g_hash_table_lookup (fsm->form_kvs, \"file_name\"); */\n/*     size_str = g_hash_table_lookup (fsm->form_kvs, \"file_size\"); */\n/*     if (size_str) */\n/*         file_size = atoll(size_str); */\n/*     if (!file_name || !parent_dir || !size_str || file_size < 0) { */\n/*         seaf_debug (\"[upload-blks] No parent dir or file name given.\\n\"); */\n/*         send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid URL.\\n\"); */\n/*         return; */\n/*     } */\n\n/*     if (!check_parent_dir (req, fsm->repo_id, parent_dir)) */\n/*         return; */\n\n/*     if (!check_tmp_file_list (fsm->files, &error_code)) */\n/*         goto error; */\n\n/*     gint64 content_len = get_content_length (req); */\n/*     if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */\n/*                                                    fsm->repo_id, */\n/*                                                    content_len) != 0) { */\n/*         error_code = ERROR_QUOTA; */\n/*         goto error; */\n/*     } */\n\n/*     blockids_json = file_list_to_json (fsm->filenames); */\n/*     tmp_files_json = file_list_to_json (fsm->files); */\n\n/*     int rc = seaf_repo_manager_post_file_blocks (seaf->repo_mgr, */\n/*                                                  fsm->repo_id, */\n/*                                                  parent_dir, */\n/*                                                  file_name, */\n/*                                                  blockids_json, */\n/*                                                  tmp_files_json, */\n/*                                                  fsm->user, */\n/*                                                  file_size, */\n/*                                                  0, */\n/*                                                  NULL, */\n/*                                                  &error); */\n/*     g_free (blockids_json); */\n/*     g_free (tmp_files_json); */\n/*     if (rc < 0) { */\n/*         if (error) { */\n/*             if (error->code == POST_FILE_ERR_FILENAME) { */\n/*                 error_code = ERROR_FILENAME; */\n/*             } */\n/*             g_clear_error (&error); */\n/*         } */\n/*         goto error; */\n/*     } */\n\n/*     send_success_reply (req); */\n/*     return; */\n\n/* error: */\n/*     switch (error_code) { */\n/*     case ERROR_FILENAME: */\n/*         send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, \"Invalid filename.\"); */\n/*         break; */\n/*     case ERROR_EXISTS: */\n/*         send_error_reply (req, SEAF_HTTP_RES_EXISTS, \"File already exists.\"); */\n/*         break; */\n/*     case ERROR_SIZE: */\n/*         send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, \"File size is too large.\"); */\n/*         break; */\n/*     case ERROR_QUOTA: */\n/*         send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, \"Out of quota.\"); */\n/*         break; */\n/*     case ERROR_RECV: */\n/*     case ERROR_INTERNAL: */\n/*         send_error_reply (req, EVHTP_RES_SERVERR, \"Internal error.\\n\"); */\n/*         break; */\n/*     } */\n/* } */\n\nstatic int\ncopy_block_to_tmp_file (int blk_fd, int tmp_fd, gint64 offset)\n{\n    if (lseek(blk_fd, 0, SEEK_SET) < 0) {\n        seaf_warning (\"Failed to rewind block temp file position to start: %s\\n\",\n                      strerror(errno));\n        return -1;\n    }\n\n    if (lseek(tmp_fd, offset, SEEK_SET) <0) {\n        seaf_warning (\"Failed to rewind web upload temp file write position: %s\\n\",\n                      strerror(errno));\n        return -1;\n    }\n\n    char buf[8192];\n    int buf_len = sizeof(buf);\n    ssize_t len;\n\n    while (TRUE) {\n        len = readn (blk_fd, buf, buf_len);\n        if (len < 0) {\n            seaf_warning (\"Failed to read content from block temp file: %s.\\n\",\n                          strerror(errno));\n            return -1;\n        } else if (len == 0) {\n            return 0;\n        }\n\n        if (writen (tmp_fd, buf, len) != len) {\n            seaf_warning (\"Failed to write content to temp file: %s.\\n\",\n                          strerror(errno));\n            return -1;\n        }\n    }\n}\n\nstatic int\nwrite_block_data_to_tmp_file (RecvFSM *fsm, const char *parent_dir,\n                              const char *file_name)\n{\n    char *abs_path;\n    char *temp_file = NULL;\n    GError *error = NULL;\n    int tmp_fd = -1;\n    int ret = 0;\n    HttpServerStruct *htp_server = seaf->http_server;\n    int cluster_shared_temp_file_mode = htp_server->cluster_shared_temp_file_mode;\n\n    abs_path = g_build_path (\"/\", parent_dir, file_name, NULL);\n\n    temp_file = seaf_repo_manager_get_upload_tmp_file (seaf->repo_mgr,\n                                                       fsm->repo_id,\n                                                       abs_path, &error);\n    if (error) {\n        seaf_warning (\"%s\\n\", error->message);\n        g_clear_error (&error);\n        ret = -1;\n        goto out;\n    }\n\n    if (!temp_file) {\n        temp_file = g_strdup_printf (\"%s/cluster-shared/%sXXXXXX\",\n                                     seaf->http_server->http_temp_dir,\n                                     file_name);\n        tmp_fd = g_mkstemp_full (temp_file, O_RDWR, cluster_shared_temp_file_mode);\n        if (tmp_fd < 0) {\n            seaf_warning (\"Failed to create upload temp file: %s.\\n\", strerror(errno));\n            ret = -1;\n            goto out;\n        }\n\n        if (seaf_repo_manager_add_upload_tmp_file (seaf->repo_mgr,\n                                                   fsm->repo_id,\n                                                   abs_path, temp_file,\n                                                   &error) < 0) {\n            seaf_warning (\"%s\\n\", error->message);\n            g_clear_error (&error);\n            close (tmp_fd);\n            g_unlink (temp_file);\n            tmp_fd = -1;\n            ret = -1;\n            goto out;\n        }\n    } else {\n        tmp_fd = g_open (temp_file, O_WRONLY);\n        if (tmp_fd < 0) {\n            seaf_warning (\"Failed to open upload temp file: %s.\\n\", strerror(errno));\n            if (errno == ENOENT) {\n                seaf_message (\"Upload temp file %s doesn't exist, remove record from db.\\n\",\n                              temp_file);\n                seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id,\n                                                       abs_path, &error);\n            }\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (copy_block_to_tmp_file (fsm->fd, tmp_fd, fsm->rstart) < 0) {\n        ret = -1;\n        goto out;\n    }\n\n    if (fsm->rend == fsm->fsize - 1) {\n        // For the last block, record tmp_files for upload to seafile and remove\n        fsm->files = g_list_prepend (fsm->files, g_strdup(temp_file)); // for virus checking, indexing...\n    }\n\nout:\n    g_free (abs_path);\n    if (tmp_fd >= 0) {\n        close (tmp_fd);\n    }\n    g_free (temp_file);\n    close (fsm->fd);\n    g_unlink (fsm->tmp_file);\n    g_free (fsm->tmp_file);\n    fsm->tmp_file = NULL;\n\n    return ret;\n}\n/*\n  Handle AJAX file upload.\n  @return an array of json data, e.g. [{\"name\": \"foo.txt\"}]\n */\nstatic void\nupload_ajax_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *parent_dir = NULL, *relative_path = NULL, *new_parent_dir = NULL;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    GError *error = NULL;\n    int error_code = -1;\n    char *filenames_json, *tmp_files_json;\n    int rc;\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Headers\",\n                                               \"x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Methods\",\n                                               \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                               \"*\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Max-Age\",\n                                               \"86400\", 1, 1));\n\n    if (evhtp_request_get_method(req) == htp_method_OPTIONS) {\n        /* If CORS preflight header, then create an empty body response (200 OK)\n         * and return it.\n         */\n        send_success_reply (req);\n        return;\n    }\n\n    /* After upload_headers_cb() returns an error, libevhtp may still\n     * receive data from the web browser and call into this cb.\n     * In this case fsm will be NULL.\n     */\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    parent_dir = g_hash_table_lookup (fsm->form_kvs, \"parent_dir\");\n    if (!parent_dir) {\n        seaf_debug (\"[upload] No parent dir given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid parent dir.\");\n        return;\n    }\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    if (!fsm->filenames) {\n        seaf_debug (\"[upload] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        return;\n    }\n\n    relative_path = g_hash_table_lookup (fsm->form_kvs, \"relative_path\");\n    if (relative_path != NULL) {\n        if (relative_path[0] == '/' || relative_path[0] == '\\\\') {\n            seaf_warning (\"Invalid relative path %s.\\n\", relative_path);\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid relative path.\");\n            return;\n        }\n        char *tmp_p = get_canonical_path(parent_dir);\n        char *tmp_r = get_canonical_path(relative_path);\n        new_parent_dir = g_build_path(\"/\", tmp_p, tmp_r, NULL);\n        g_free(tmp_p);\n        g_free(tmp_r);\n    } else {\n        new_parent_dir = get_canonical_path(parent_dir);\n    }\n\n    if (fsm->rstart >= 0) {\n        if (fsm->filenames->next) {\n            seaf_debug (\"[upload] Breakpoint transfer only support one file in one request.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"More files in one request.\\n\");\n            goto out;\n        }\n\n        if (parent_dir[0] != '/') {\n            seaf_debug (\"[upload] Invalid parent dir, should start with /.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid parent dir.\\n\");\n            goto out;\n        }\n\n        if (!fsm->resumable_tmp_file)\n            fsm->resumable_tmp_file = g_build_path (\"/\", new_parent_dir, (char *)fsm->filenames->data, NULL);\n\n        if (write_block_data_to_tmp_file (fsm, new_parent_dir,\n                                          (char *)fsm->filenames->data) < 0) {\n            error_code = ERROR_INTERNAL;\n            goto out;\n        }\n        if (fsm->rend != fsm->fsize - 1) {\n            const char *success_str = \"{\\\"success\\\": true}\";\n            evbuffer_add (req->buffer_out, success_str, strlen(success_str));\n            send_success_reply_ie8_compatible (req, EVHTP_RES_OK);\n            goto out;\n        }\n    }\n\n    if (!fsm->files) {\n        seaf_debug (\"[upload] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        goto out;\n    }\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        goto out;\n\n    if (!fsm->parent_dir || !is_parent_matched (fsm->parent_dir, parent_dir)){\n        error_code = ERROR_FORBIDDEN;\n        goto out;\n    }\n\n    if (!check_tmp_file_list (fsm->files, &error_code))\n        goto out;\n\n    gint64 content_len;\n    if (fsm->fsize > 0)\n        content_len = fsm->fsize;\n    else\n        content_len = get_content_length (req);\n\n    if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                   fsm->repo_id,\n                                                   content_len) != 0) {\n        error_code = ERROR_QUOTA;\n        goto out;\n    }\n\n    rc = create_relative_path (fsm, parent_dir, relative_path);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        goto out;\n    }\n\n    filenames_json = file_list_to_json (fsm->filenames);\n    tmp_files_json = file_list_to_json (fsm->files);\n\n    char *ret_json = NULL;\n    char *task_id = NULL;\n    rc = seaf_repo_manager_post_multi_files (seaf->repo_mgr,\n                                             fsm->repo_id,\n                                             new_parent_dir,\n                                             filenames_json,\n                                             tmp_files_json,\n                                             fsm->user,\n                                             0,\n                                             mtime,\n                                             &ret_json,\n                                             fsm->need_idx_progress ? &task_id : NULL,\n                                             &error);\n    g_free (filenames_json);\n    g_free (tmp_files_json);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (error->code == POST_FILE_ERR_FILENAME) {\n                error_code = ERROR_FILENAME;\n            } else if (error->code == SEAF_ERR_FILES_WITH_SAME_NAME) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_BADREQ, \"Too many files with same name.\\n\");\n            } else if (error->code == SEAF_ERR_GC_CONFLICT) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_CONFLICT, \"GC Conflict.\\n\");\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n\n    if (task_id) {\n        evbuffer_add (req->buffer_out, task_id, strlen(task_id));\n        g_free (task_id);\n    } else {\n        evbuffer_add (req->buffer_out, ret_json, strlen(ret_json));\n    }\n    g_free (ret_json);\n\n    send_success_reply_ie8_compatible (req, EVHTP_RES_OK);\n\n    char *oper = \"web-file-upload\";\n    if (g_strcmp0(fsm->token_type, \"upload-link\") == 0)\n        oper = \"link-file-upload\";\n    send_statistic_msg(fsm->repo_id, fsm->user, oper, (guint64)content_len);\n\nout:\n    g_free (new_parent_dir);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\nstatic void\nupdate_api_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *target_file, *parent_dir = NULL, *filename = NULL;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    const char *head_id = NULL;\n    GError *error = NULL;\n    int error_code = -1;\n    char *new_file_id = NULL;\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Headers\",\n                                               \"x-requested-with, content-type, content-range, content-disposition, accept, origin, authorization\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Methods\",\n                                               \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                               \"*\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Max-Age\",\n                                               \"86400\", 1, 1));\n\n    if (evhtp_request_get_method(req) == htp_method_OPTIONS) {\n        /* If CORS preflight header, then create an empty body response (200 OK)\n         * and return it.\n         */\n        send_success_reply (req);\n        return;\n    }\n\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    if (!fsm->filenames) {\n        seaf_debug (\"[Update] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        return;\n    }\n\n    target_file = g_hash_table_lookup (fsm->form_kvs, \"target_file\");\n    if (!target_file) {\n        seaf_debug (\"[Update] No target file given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No target file.\\n\");\n        return;\n    }\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    parent_dir = g_path_get_dirname (target_file);\n    filename = g_path_get_basename (target_file);\n    if (!filename || filename[0] == '\\0') {\n        seaf_debug (\"[Update] Bad target_file.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid targe_file.\\n\");\n        goto out;\n    }\n\n    if (fsm->rstart >= 0) {\n        if (fsm->filenames->next) {\n            seaf_debug (\"[Update] Breakpoint transfer only support one file in one request.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"More than one file in one request.\\n\");\n            goto out;\n        }\n\n        if (parent_dir[0] != '/') {\n            seaf_debug (\"[Update] Invalid parent dir, should start with /.\\n\");\n            send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid parent dir.\\n\");\n            goto out;\n        }\n\n        if (!fsm->resumable_tmp_file)\n            fsm->resumable_tmp_file = g_build_path (\"/\", parent_dir, filename, NULL);\n\n        if (write_block_data_to_tmp_file (fsm, parent_dir, filename) < 0) {\n            send_error_reply (req, EVHTP_RES_SERVERR, \"Internal error.\\n\");\n            goto out;\n        }\n\n        if (fsm->rend != fsm->fsize - 1) {\n            const char *success_str = \"{\\\"success\\\": true}\";\n            evbuffer_add (req->buffer_out, success_str, strlen(success_str));\n            send_success_reply_ie8_compatible (req, EVHTP_RES_OK);\n            goto out;\n        }\n    }\n\n    if (!fsm->files) {\n        seaf_debug (\"[Update] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        goto out;\n    }\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        goto out;\n\n    if (!check_tmp_file_list (fsm->files, &error_code))\n        goto out;\n\n    head_id = evhtp_kv_find (req->uri->query, \"head\");\n\n    gint64 content_len;\n    if (fsm->fsize > 0)\n        content_len = fsm->fsize;\n    else\n        content_len = get_content_length (req);\n    if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                   fsm->repo_id,\n                                                   content_len) != 0) {\n        error_code = ERROR_QUOTA;\n        goto out;\n    }\n\n    int rc = seaf_repo_manager_put_file (seaf->repo_mgr,\n                                         fsm->repo_id,\n                                         (char *)(fsm->files->data),\n                                         parent_dir,\n                                         filename,\n                                         fsm->user,\n                                         head_id,\n                                         mtime,\n                                         &new_file_id,\n                                         &error);\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (g_strcmp0 (error->message, \"file does not exist\") == 0) {\n                error_code = ERROR_NOT_EXIST;\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n\n    /* Send back the new file id, so that the mobile client can update local cache */\n    evbuffer_add(req->buffer_out, new_file_id, strlen(new_file_id));\n    send_success_reply (req);\n\nout:\n    if (fsm->rstart >= 0 && fsm->rend == fsm->fsize - 1) {\n        // File upload success, try to remove tmp file from WebUploadTmpFile table\n        char *abs_path;\n\n        abs_path = g_build_path (\"/\", parent_dir, filename, NULL);\n\n        seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, abs_path, NULL);\n        g_free (abs_path);\n    }\n    g_free (parent_dir);\n    g_free (filename);\n    g_free (new_file_id);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\nstatic void\nupdate_blks_api_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *target_file, *parent_dir = NULL, *filename = NULL, *size_str = NULL;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    const char *commitonly_str;\n    GError *error = NULL;\n    int error_code = -1;\n    char *new_file_id = NULL;\n    char *blockids_json;\n    gint64 file_size = -1;\n\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n    target_file = g_hash_table_lookup (fsm->form_kvs, \"target_file\");\n    size_str = g_hash_table_lookup (fsm->form_kvs, \"file_size\");\n    if (size_str)  file_size = atoll(size_str);\n    if (!target_file || !size_str || file_size < 0) {\n        seaf_debug (\"[Update-blks] No target file given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No target file.\\n\");\n        return;\n    }\n    commitonly_str = evhtp_kv_find (req->uri->query, \"commitonly\");\n    if (!commitonly_str) {\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Only commit supported.\\n\");\n        return;\n    }\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    parent_dir = g_path_get_dirname (target_file);\n    filename = g_path_get_basename (target_file);\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        goto out;\n\n    int rc = 0;\n    /* if (!commitonly_str) { */\n    /*     gint64 content_len = get_content_length(req); */\n    /*     if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */\n    /*                                                    fsm->repo_id, */\n    /*                                                    content_len) != 0) { */\n    /*         error_code = ERROR_QUOTA; */\n    /*         goto error; */\n    /*     } */\n\n    /*     if (!check_tmp_file_list (fsm->files, &error_code)) */\n    /*         goto error; */\n\n    /*     blockids_json = file_list_to_json (fsm->filenames); */\n    /*     tmp_files_json = file_list_to_json (fsm->files); */\n    /*     rc = seaf_repo_manager_put_file_blocks (seaf->repo_mgr, */\n    /*                                             fsm->repo_id, */\n    /*                                             parent_dir, */\n    /*                                             filename, */\n    /*                                             blockids_json, */\n    /*                                             tmp_files_json, */\n    /*                                             fsm->user, */\n    /*                                             head_id, */\n    /*                                             file_size, */\n    /*                                             &new_file_id, */\n    /*                                             &error); */\n    /*     g_free (blockids_json); */\n    /*     g_free (tmp_files_json); */\n    /* } else { */\n\n    blockids_json = g_hash_table_lookup (fsm->form_kvs, \"blockids\");\n    if (blockids_json == NULL) {\n        seaf_debug (\"[upload-blks] No blockids given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No blockids.\\n\");\n        goto out;\n    }\n    rc = seaf_repo_manager_commit_file_blocks (seaf->repo_mgr,\n                                               fsm->repo_id,\n                                               parent_dir,\n                                               filename,\n                                               blockids_json,\n                                               fsm->user,\n                                               file_size,\n                                               1,\n                                               mtime,\n                                               &new_file_id,\n                                               &error);\n\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (g_strcmp0 (error->message, \"file does not exist\") == 0) {\n                error_code = ERROR_NOT_EXIST;\n            } else if (error->code == POST_FILE_ERR_QUOTA_FULL) {\n                error_code = ERROR_QUOTA;\n            } else if (error->code == SEAF_ERR_GC_CONFLICT) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_CONFLICT, \"GC Conflict.\\n\");\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n\n    /* Send back the new file id, so that the mobile client can update local cache */\n    evbuffer_add(req->buffer_out, new_file_id, strlen(new_file_id));\n    send_success_reply (req);\n\nout:\n    g_free (parent_dir);\n    g_free (filename);\n    g_free (new_file_id);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\n/* static void */\n/* update_blks_ajax_cb(evhtp_request_t *req, void *arg) */\n/* { */\n/*     RecvFSM *fsm = arg; */\n/*     char *target_file, *parent_dir = NULL, *filename = NULL, *size_str = NULL; */\n/*     const char *head_id = NULL; */\n/*     GError *error = NULL; */\n/*     int error_code = ERROR_INTERNAL; */\n/*     char *blockids_json, *tmp_files_json; */\n/*     gint64 file_size = -1; */\n\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Headers\", */\n/*                                                \"x-requested-with, content-type, accept, origin, authorization\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Methods\", */\n/*                                                \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Allow-Origin\", */\n/*                                                \"*\", 1, 1)); */\n/*     evhtp_headers_add_header (req->headers_out, */\n/*                               evhtp_header_new(\"Access-Control-Max-Age\", */\n/*                                                \"86400\", 1, 1)); */\n\n/*     if (evhtp_request_get_method(req) == htp_method_OPTIONS) { */\n/*         /\\* If CORS preflight header, then create an empty body response (200 OK) */\n/*          * and return it. */\n/*          *\\/ */\n/*         send_success_reply (req); */\n/*         return; */\n/*     } */\n\n/*     if (!fsm || fsm->state == RECV_ERROR) */\n/*         return; */\n/*     target_file = g_hash_table_lookup (fsm->form_kvs, \"target_file\"); */\n/*     size_str = g_hash_table_lookup (fsm->form_kvs, \"file_size\"); */\n/*     if (size_str)  file_size = atoll(size_str); */\n/*     if (!target_file || !size_str || file_size < 0) { */\n/*         seaf_debug (\"[Update-blks] No target file given.\\n\"); */\n/*         send_error_reply (req, EVHTP_RES_BADREQ, \"Invalid URL.\\n\"); */\n/*         return; */\n/*     } */\n\n/*     parent_dir = g_path_get_dirname (target_file); */\n/*     filename = g_path_get_basename (target_file); */\n\n/*     if (!check_parent_dir (req, fsm->repo_id, parent_dir)) */\n/*         return; */\n\n/*     if (!check_tmp_file_list (fsm->files, &error_code)) */\n/*         goto error; */\n\n/*     head_id = evhtp_kv_find (req->uri->query, \"head\"); */\n\n/*     gint64 content_len = get_content_length (req); */\n/*     if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr, */\n/*                                                    fsm->repo_id, */\n/*                                                    content_len) != 0) { */\n/*         error_code = ERROR_QUOTA; */\n/*         goto error; */\n/*     } */\n\n/*     blockids_json = file_list_to_json (fsm->filenames); */\n/*     tmp_files_json = file_list_to_json (fsm->files); */\n/*     int rc = seaf_repo_manager_put_file_blocks (seaf->repo_mgr, */\n/*                                                 fsm->repo_id, */\n/*                                                 parent_dir, */\n/*                                                 filename, */\n/*                                                 blockids_json, */\n/*                                                 tmp_files_json, */\n/*                                                 fsm->user, */\n/*                                                 head_id, */\n/*                                                 file_size, */\n/*                                                 NULL, */\n/*                                                 &error); */\n/*     g_free (blockids_json); */\n/*     g_free (tmp_files_json); */\n/*     g_free (parent_dir); */\n/*     g_free (filename); */\n\n/*     if (rc < 0) { */\n/*         if (error) { */\n/*             if (g_strcmp0 (error->message, \"file does not exist\") == 0) { */\n/*                 error_code = ERROR_NOT_EXIST; */\n/*             } */\n/*             g_clear_error (&error); */\n/*         } */\n/*         goto error; */\n/*     } */\n\n/*     send_success_reply (req); */\n\n/*     return; */\n\n/* error: */\n/*     switch (error_code) { */\n/*     case ERROR_FILENAME: */\n/*         send_error_reply (req, SEAF_HTTP_RES_BADFILENAME, \"Invalid filename.\\n\"); */\n/*         break; */\n/*     case ERROR_EXISTS: */\n/*         send_error_reply (req, SEAF_HTTP_RES_EXISTS, \"File already exists.\\n\"); */\n/*         break; */\n/*     case ERROR_SIZE: */\n/*         send_error_reply (req, SEAF_HTTP_RES_TOOLARGE, \"File size is too large.\\n\"); */\n/*         break; */\n/*     case ERROR_QUOTA: */\n/*         send_error_reply (req, SEAF_HTTP_RES_NOQUOTA, \"Out of quota.\\n\"); */\n/*         break; */\n/*     case ERROR_NOT_EXIST: */\n/*         send_error_reply (req, SEAF_HTTP_RES_NOT_EXISTS, \"File does not exist.\\n\"); */\n/*         break; */\n/*     case ERROR_RECV: */\n/*     case ERROR_INTERNAL: */\n/*     default: */\n/*         send_error_reply (req, EVHTP_RES_SERVERR, \"Internal error.\\n\"); */\n/*         break; */\n/*     } */\n/* } */\n\nstatic char *\nformat_update_json_ret (const char *filename, const char *file_id, gint64 size)\n{\n    json_t *array, *obj;\n    char *json_data;\n    char *ret;\n\n    array = json_array ();\n\n    obj = json_object ();\n    json_object_set_string_member (obj, \"name\", filename);\n    json_object_set_string_member (obj, \"id\", file_id);\n    json_object_set_int_member (obj, \"size\", size);\n    json_array_append_new (array, obj);\n\n    json_data = json_dumps (array, 0);\n    json_decref (array);\n\n    ret = g_strdup (json_data);\n    free (json_data);\n    return ret;\n}\n\nstatic void\nupdate_ajax_cb(evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *target_file, *parent_dir = NULL, *filename = NULL;\n    char *last_modify = NULL;\n    gint64 mtime = 0;\n    const char *head_id = NULL;\n    GError *error = NULL;\n    int error_code = -1;\n    char *new_file_id = NULL;\n    gint64 size;\n\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Headers\",\n                                               \"x-requested-with, content-type, accept, origin, authorization\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Methods\",\n                                               \"GET, POST, PUT, PATCH, DELETE, OPTIONS\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Allow-Origin\",\n                                               \"*\", 1, 1));\n    evhtp_headers_add_header (req->headers_out,\n                              evhtp_header_new(\"Access-Control-Max-Age\",\n                                               \"86400\", 1, 1));\n\n\n    if (evhtp_request_get_method(req) == htp_method_OPTIONS) {\n        /* If CORS preflight header, then create an empty body response (200 OK)\n         * and return it.\n         */\n        send_success_reply (req);\n        return;\n    }\n\n    if (!fsm || fsm->state == RECV_ERROR)\n        return;\n\n    if (!fsm->files) {\n        seaf_debug (\"[update] No file uploaded.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No file uploaded.\\n\");\n        return;\n    }\n\n    target_file = g_hash_table_lookup (fsm->form_kvs, \"target_file\");\n    if (!target_file) {\n        seaf_debug (\"[Update] No target file given.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No target file.\");\n        return;\n    }\n\n    last_modify = g_hash_table_lookup (fsm->form_kvs, \"last_modify\");\n    if (last_modify) {\n        mtime = rfc3339_to_timestamp (last_modify);\n    }\n\n    parent_dir = g_path_get_dirname (target_file);\n    filename = g_path_get_basename (target_file);\n\n    if (!check_parent_dir (req, fsm->repo_id, parent_dir))\n        goto out;\n\n    if (!check_tmp_file_list (fsm->files, &error_code))\n        goto out;\n\n    SeafStat st;\n    char *tmp_file_path = fsm->files->data;\n    if (seaf_stat (tmp_file_path, &st) < 0) {\n        seaf_warning (\"Failed to stat tmp file %s.\\n\", tmp_file_path);\n        error_code = ERROR_INTERNAL;\n        goto out;\n    }\n    size = (gint64)st.st_size;\n\n    head_id = evhtp_kv_find (req->uri->query, \"head\");\n\n    gint64 content_len = get_content_length (req);\n    if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                   fsm->repo_id,\n                                                   content_len) != 0) {\n        error_code = ERROR_QUOTA;\n        goto out;\n    }\n\n    int rc = seaf_repo_manager_put_file (seaf->repo_mgr,\n                                         fsm->repo_id,\n                                         (char *)(fsm->files->data),\n                                         parent_dir,\n                                         filename,\n                                         fsm->user,\n                                         head_id,\n                                         mtime,\n                                         &new_file_id,\n                                         &error);\n\n    if (rc < 0) {\n        error_code = ERROR_INTERNAL;\n        if (error) {\n            if (g_strcmp0 (error->message, \"file does not exist\") == 0) {\n                error_code = ERROR_NOT_EXIST;\n            } else if (error->code == SEAF_ERR_GC_CONFLICT) {\n                error_code = -1;\n                send_error_reply (req, EVHTP_RES_CONFLICT, \"GC Conflict.\\n\");\n            }\n            g_clear_error (&error);\n        }\n        goto out;\n    }\n    send_statistic_msg(fsm->repo_id, fsm->user, \"web-file-upload\", (guint64)content_len);\n\n    char *json_ret = format_update_json_ret (filename, new_file_id, size);\n\n    evbuffer_add (req->buffer_out, json_ret, strlen(json_ret));\n    send_success_reply (req);\n    g_free (json_ret);\n\nout:\n    g_free (parent_dir);\n    g_free (new_file_id);\n    g_free (filename);\n    send_reply_by_error_code (req, error_code);\n\n    return;\n}\n\n/*\nstatic void\nupload_link_cb(evhtp_request_t *req, void *arg)\n{\n    return upload_api_cb (req, arg);\n}\n*/\n\nstatic evhtp_res\nupload_finish_cb (evhtp_request_t *req, void *arg)\n{\n    RecvFSM *fsm = arg;\n    GList *ptr;\n\n    seaf_metric_manager_in_flight_request_dec (seaf->metric_mgr);\n\n    if (!fsm)\n        return EVHTP_RES_OK;\n\n    /* Clean up FSM struct no matter upload succeed or not. */\n\n    g_free (fsm->parent_dir);\n    g_free (fsm->user);\n    g_free (fsm->boundary);\n    g_free (fsm->input_name);\n    g_free (fsm->token_type);\n\n    g_hash_table_destroy (fsm->form_kvs);\n\n    g_free (fsm->file_name);\n    if (fsm->tmp_file) {\n        close (fsm->fd);\n        // For resumable upload, in case tmp file not be deleted\n        if (fsm->rstart >= 0) {\n            g_unlink (fsm->tmp_file);\n        }\n    }\n    g_free (fsm->tmp_file);\n\n    if (fsm->resumable_tmp_file) {\n        if (fsm->rstart >= 0 && fsm->rend == fsm->fsize - 1) {\n            seaf_repo_manager_del_upload_tmp_file (seaf->repo_mgr, fsm->repo_id, fsm->resumable_tmp_file, NULL);\n        }\n        g_free (fsm->resumable_tmp_file);\n    }\n\n    g_free (fsm->repo_id);\n\n    if (!fsm->need_idx_progress) {\n        for (ptr = fsm->files; ptr; ptr = ptr->next)\n            g_unlink ((char *)(ptr->data));\n    }\n    string_list_free (fsm->filenames);\n    string_list_free (fsm->files);\n\n    evbuffer_free (fsm->line);\n\n    if (fsm->progress_id) {\n        pthread_mutex_lock (&pg_lock);\n        g_hash_table_remove (upload_progress, fsm->progress_id);\n        pthread_mutex_unlock (&pg_lock);\n\n        /* fsm->progress has been free'd by g_hash_table_remove(). */\n        g_free (fsm->progress_id);\n    }\n\n    g_free (fsm);\n\n    return EVHTP_RES_OK;\n}\n\nstatic char *\nget_mime_header_param_value (const char *param)\n{\n    char *first_quote, *last_quote;\n    char *value;\n\n    // param may not start with double quotes. \n    first_quote = strchr (param, '\\\"');\n    if (!first_quote) {\n        return g_strdup (param);\n    }\n    last_quote = strrchr (param, '\\\"');\n    if (!first_quote || !last_quote || first_quote == last_quote) {\n        seaf_debug (\"[upload] Invalid mime param %s.\\n\", param);\n        return NULL;\n    }\n\n    value = g_strndup (first_quote + 1, last_quote - first_quote - 1);\n    return value;\n}\n\nstatic char *\nparse_file_name_from_header (evhtp_request_t *req)\n{\n    const char *dispose = NULL;\n    char **p;\n    char **params;\n    char *dec_file_name = NULL;\n\n    dispose = evhtp_kv_find (req->headers_in, \"Content-Disposition\");\n    if (!dispose)\n        return NULL;\n\n    params = g_strsplit (dispose, \";\", 2);\n    for (p = params; *p != NULL; ++p)\n        *p = g_strstrip (*p);\n\n    if (g_strv_length (params) != 2 ||\n        strcasecmp (params[0], \"attachment\") != 0 ||\n        strncasecmp (params[1], \"filename\", strlen(\"filename\")) != 0) {\n        seaf_warning (\"[upload] Invalid Content-Disposition header.\\n\");\n        g_strfreev (params);\n        return NULL;\n    }\n\n    char *file_name = get_mime_header_param_value (params[1]);\n    if (file_name)\n        dec_file_name = g_uri_unescape_string (file_name, NULL);\n    g_free (file_name);\n    g_strfreev (params);\n\n    return dec_file_name;\n}\n\nstatic int\nparse_mime_header (evhtp_request_t *req, char *header, RecvFSM *fsm)\n{\n    char *colon;\n    char **params, **p;\n\n    colon = strchr (header, ':');\n    if (!colon) {\n        seaf_debug (\"[upload] bad mime header format.\\n\");\n        return -1;\n    }\n\n    *colon = 0;\n    // Content-Disposition is case-insensitive.\n    if (strcasecmp (header, \"Content-Disposition\") == 0) {\n        params = g_strsplit (colon + 1, \";\", 3);\n        for (p = params; *p != NULL; ++p)\n            *p = g_strstrip (*p);\n\n        if (g_strv_length (params) < 2) {\n            seaf_debug (\"[upload] Too little params for mime header.\\n\");\n            g_strfreev (params);\n            return -1;\n        }\n        if (strcasecmp (params[0], \"form-data\") != 0) {\n            seaf_debug (\"[upload] Invalid Content-Disposition\\n\");\n            g_strfreev (params);\n            return -1;\n        }\n\n        for (p = params; *p != NULL; ++p) {\n            if (strncasecmp (*p, \"name\", strlen(\"name\")) == 0) {\n                fsm->input_name = get_mime_header_param_value (*p);\n                break;\n            }\n        }\n        if (!fsm->input_name) {\n            seaf_debug (\"[upload] No input-name given.\\n\");\n            g_strfreev (params);\n            return -1;\n        }\n\n        if (strcmp (fsm->input_name, \"file\") == 0) {\n            char *file_name;\n            for (p = params; *p != NULL; ++p) {\n                if (strncasecmp (*p, \"filename\", strlen(\"filename\")) == 0) {\n                    if (fsm->rstart >= 0) {\n                        file_name = parse_file_name_from_header (req);\n                    } else {\n                        file_name = get_mime_header_param_value (*p);\n                    }\n                    if (file_name) {\n                        fsm->file_name = normalize_utf8_path (file_name);\n                        if (!fsm->file_name)\n                            seaf_debug (\"File name is not valid utf8 encoding.\\n\");\n                        g_free (file_name);\n                    }\n                    break;\n                }\n            }\n            if (!fsm->file_name) {\n                seaf_debug (\"[upload] No filename given.\\n\");\n                g_strfreev (params);\n                return -1;\n            }\n        }\n        g_strfreev (params);\n    }\n\n    return 0;\n}\n\nstatic int\nopen_temp_file (RecvFSM *fsm)\n{\n    GString *temp_file = g_string_new (NULL);\n    char *base_name = get_basename(fsm->file_name);\n\n    g_string_printf (temp_file, \"%s/%sXXXXXX\",\n                     seaf->http_server->http_temp_dir, base_name);\n    g_free (base_name);\n\n    fsm->fd = g_mkstemp (temp_file->str);\n    if (fsm->fd < 0) {\n        seaf_warning(\"[upload] Failed to open temp file: %s.\\n\", strerror(errno));\n        g_string_free (temp_file, TRUE);\n        return -1;\n    }\n\n    fsm->tmp_file = g_string_free (temp_file, FALSE);\n    /* For clean up later. */\n    if (fsm->rstart < 0) {\n        fsm->files = g_list_prepend (fsm->files, g_strdup(fsm->tmp_file));\n    }\n\n    return 0;\n}\n\nstatic evhtp_res\nrecv_form_field (RecvFSM *fsm, gboolean *no_line)\n{\n    char *line, *norm_line;\n    size_t len;\n\n    *no_line = FALSE;\n\n    line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT);\n    if (line != NULL) {\n        if (strstr (line, fsm->boundary) != NULL) {\n            seaf_debug (\"[upload] form field ends.\\n\");\n\n            g_free (fsm->input_name);\n            fsm->input_name = NULL;\n            fsm->state = RECV_HEADERS;\n        } else {\n            seaf_debug (\"[upload] form field is %s.\\n\", line);\n\n            norm_line = normalize_utf8_path (line);\n            if (norm_line) {\n                g_hash_table_insert (fsm->form_kvs,\n                                     g_strdup(fsm->input_name),\n                                     norm_line);\n            }\n        }\n        free (line);\n    } else {\n        size_t size = evbuffer_get_length (fsm->line);\n        if (size >= strlen(fsm->boundary) + 4) {\n            struct evbuffer_ptr search_boundary = evbuffer_search (fsm->line,\n                                                                   fsm->boundary,\n                                                                   strlen(fsm->boundary),\n                                                                   NULL);\n            if (search_boundary.pos != -1) {\n                seaf_debug (\"[upload] form field ends.\\n\");\n                evbuffer_drain (fsm->line, size);\n                g_free (fsm->input_name);\n                fsm->input_name = NULL;\n                fsm->state = RECV_HEADERS;\n            }\n        }\n        *no_line = TRUE;\n    }\n\n    return EVHTP_RES_OK;\n}\n\nstatic evhtp_res\nadd_uploaded_file (RecvFSM *fsm)\n{\n    if (fsm->rstart < 0) {\n        // Non breakpoint transfer, same as original\n\n        /* In case of using NFS, the error may only occur in close(). */\n        if (close (fsm->fd) < 0) {\n            seaf_warning (\"[upload] Failed to close temp file: %s\\n\", strerror(errno));\n            return EVHTP_RES_SERVERR;\n        }\n\n        fsm->filenames = g_list_prepend (fsm->filenames,\n                                         get_basename(fsm->file_name));\n\n        g_free (fsm->file_name);\n        g_free (fsm->tmp_file);\n        fsm->file_name = NULL;\n        fsm->tmp_file = NULL;\n        fsm->recved_crlf = FALSE;\n    } else {\n        fsm->filenames = g_list_prepend (fsm->filenames,\n                                         get_basename(fsm->file_name));\n        g_free (fsm->file_name);\n        fsm->file_name = NULL;\n        fsm->recved_crlf = FALSE;\n    }\n\n    return EVHTP_RES_OK;\n}\n\nstatic evhtp_res\nrecv_file_data (RecvFSM *fsm, gboolean *no_line)\n{\n    char *line;\n    size_t len;\n\n    *no_line = FALSE;\n\n    line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT);\n    if (!line) {\n        // handle boundary\n        size_t size = evbuffer_get_length (fsm->line);\n        /* If we haven't read an entire line, but the line\n         * buffer gets too long, flush the content to file,\n         * or we reach the last boundary line (without CRLF at the end).\n         * Since the last boundary line starts with \"--\" and ends with \"--\"\n         * we have to add 4 bytes to the boundary size.\n         */\n        if (size >= strlen(fsm->boundary) + 4) {\n            char *buf = g_new0 (char, size + 1);\n            evbuffer_remove (fsm->line, buf, size);\n            // strstr need a '\\0'\n            if (strstr(buf, fsm->boundary) != NULL) {\n                seaf_debug (\"[upload] file data ends.\\n\");\n                evhtp_res res = add_uploaded_file (fsm);\n                if (res != EVHTP_RES_OK) {\n                    g_free(buf);\n                    return res;\n                }\n                g_free (fsm->input_name);\n                fsm->input_name = NULL;\n                fsm->state = RECV_HEADERS;\n            } else {\n                seaf_debug (\"[upload] recv file data %d bytes.\\n\", size);\n                if (fsm->recved_crlf) {\n                    if (writen (fsm->fd, \"\\r\\n\", 2) < 0) {\n                        seaf_warning (\"[upload] Failed to write temp file: %s.\\n\",\n                                   strerror(errno));\n                        return EVHTP_RES_SERVERR;\n                    }\n                }\n                if (writen (fsm->fd, buf, size) < 0) {\n                    seaf_warning (\"[upload] Failed to write temp file: %s.\\n\",\n                               strerror(errno));\n                    g_free (buf);\n                    return EVHTP_RES_SERVERR;\n                }\n                fsm->recved_crlf = FALSE;\n            }\n            g_free(buf);\n        }\n        *no_line = TRUE;\n    } else if (strstr (line, fsm->boundary) != NULL) {\n        seaf_debug (\"[upload] file data ends.\\n\");\n\n        evhtp_res res = add_uploaded_file (fsm);\n        if (res != EVHTP_RES_OK) {\n            free (line);\n            return res;\n        }\n\n        g_free (fsm->input_name);\n        fsm->input_name = NULL;\n        fsm->state = RECV_HEADERS;\n        free (line);\n    } else {\n        seaf_debug (\"[upload] recv file data %d bytes.\\n\", len + 2);\n        if (fsm->recved_crlf) {\n            if (writen (fsm->fd, \"\\r\\n\", 2) < 0) {\n                seaf_warning (\"[upload] Failed to write temp file: %s.\\n\",\n                           strerror(errno));\n                return EVHTP_RES_SERVERR;\n            }\n        }\n        if (writen (fsm->fd, line, len) < 0) {\n            seaf_warning (\"[upload] Failed to write temp file: %s.\\n\",\n                       strerror(errno));\n            free (line);\n            return EVHTP_RES_SERVERR;\n        }\n        free (line);\n        fsm->recved_crlf = TRUE;\n    }\n\n    return EVHTP_RES_OK;\n}\n\n/*\n   Refer to https://www.w3.org/Protocols/rfc1341/7_2_Multipart.html\n   and https://tools.ietf.org/html/rfc7578\n   Example multipart form-data request content format:\n\n   --AaB03x\n   Content-Disposition: form-data; name=\"submit-name\"\n\n   Larry\n   --AaB03x\n   Content-Disposition: form-data; name=\"file\"; filename=\"file1.txt\"\n   Content-Type: text/plain\n\n   ... contents of file1.txt ...\n   --AaB03x--\n*/\nstatic evhtp_res\nupload_read_cb (evhtp_request_t *req, evbuf_t *buf, void *arg)\n{\n    RecvFSM *fsm = arg;\n    char *line;\n    size_t len;\n    gboolean no_line = FALSE;\n    int res = EVHTP_RES_OK;\n\n    if (fsm->state == RECV_ERROR)\n        return EVHTP_RES_OK;\n\n    /* Update upload progress. */\n    if (fsm->progress) {\n        fsm->progress->uploaded += (gint64)evbuffer_get_length(buf);\n\n        seaf_debug (\"progress: %lld/%lld\\n\",\n                    fsm->progress->uploaded, fsm->progress->size);\n    }\n\n    evbuffer_add_buffer (fsm->line, buf);\n    /* Drain the buffer so that evhtp don't copy it to another buffer\n     * after this callback returns.\n     */\n    evbuffer_drain (buf, evbuffer_get_length (buf));\n\n    while (!no_line) {\n        switch (fsm->state) {\n        case RECV_INIT:\n            line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT);\n            if (line != NULL) {\n                seaf_debug (\"[upload] boundary line: %s.\\n\", line);\n                if (!strstr (line, fsm->boundary)) {\n                    seaf_debug (\"[upload] no boundary found in the first line.\\n\");\n                    free (line);\n                    res = EVHTP_RES_BADREQ;\n                    goto out;\n                } else {\n                    fsm->state = RECV_HEADERS;\n                    free (line);\n                }\n            } else {\n                no_line = TRUE;\n            }\n            break;\n        case RECV_HEADERS:\n            line = evbuffer_readln (fsm->line, &len, EVBUFFER_EOL_CRLF_STRICT);\n            if (line != NULL) {\n                seaf_debug (\"[upload] mime header line: %s.\\n\", line);\n                if (len == 0) {\n                    /* Read an blank line, headers end. */\n                    free (line);\n                    // Each part MUST contain a Content-Disposition header field\n                    if (!fsm->input_name) {\n                        res = EVHTP_RES_BADREQ;\n                        goto out;\n                    }\n                    if (g_strcmp0 (fsm->input_name, \"file\") == 0) {\n                        if (open_temp_file (fsm) < 0) {\n                            seaf_warning (\"[upload] Failed open temp file, errno:[%d]\\n\", errno);\n                            res = EVHTP_RES_SERVERR;\n                            goto out;\n                        }\n                    }\n                    seaf_debug (\"[upload] Start to recv %s.\\n\", fsm->input_name);\n                    fsm->state = RECV_CONTENT;\n                } else if (parse_mime_header (req, line, fsm) < 0) {\n                    free (line);\n                    res = EVHTP_RES_BADREQ;\n                    goto out;\n                } else {\n                    free (line);\n                }\n            } else {\n                no_line = TRUE;\n            }\n            break;\n        case RECV_CONTENT:\n            if (g_strcmp0 (fsm->input_name, \"file\") == 0)\n                res = recv_file_data (fsm, &no_line);\n            else\n                res = recv_form_field (fsm, &no_line);\n\n            if (res != EVHTP_RES_OK)\n                goto out;\n\n            break;\n        }\n    }\n\nout:\n    if (res != EVHTP_RES_OK) {\n        /* Don't receive any data before the connection is closed. */\n        //evhtp_request_pause (req);\n\n        /* Set keepalive to 0. This will cause evhtp to close the\n         * connection after sending the reply.\n         */\n        req->keepalive = 0;\n\n        fsm->state = RECV_ERROR;\n    }\n\n    if (res == EVHTP_RES_BADREQ) {\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Bad request.\\n\");\n    } else if (res == EVHTP_RES_SERVERR) {\n        send_error_reply (req, EVHTP_RES_SERVERR, \"Internal server error\\n\");\n    }\n    return EVHTP_RES_OK;\n}\n\nstatic char *\nget_http_header_param_value (const char *param)\n{\n    char *equal;\n    char *value;\n\n    equal = strchr (param, '=');\n    if (!equal) {\n        seaf_debug (\"[upload] Invalid http header param %s.\\n\", param);\n        return NULL;\n    }\n\n    value = g_strdup (equal + 1);\n    return value;\n}\n\nstatic char *\nget_boundary (evhtp_headers_t *hdr)\n{\n    const char *content_type;\n    char **params, **p;\n    char *boundary = NULL;\n\n    content_type = evhtp_kv_find (hdr, \"Content-Type\");\n    if (!content_type) {\n        seaf_debug (\"[upload] Missing Content-Type header\\n\");\n        return boundary;\n    }\n\n    params = g_strsplit (content_type, \";\", 0);\n    for (p = params; *p != NULL; ++p)\n        *p = g_strstrip (*p);\n\n    if (!params || g_strv_length (params) < 2) {\n        seaf_debug (\"[upload] Too little params Content-Type header\\n\");\n        g_strfreev (params);\n        return boundary;\n    }\n    if (strcasecmp (params[0], \"multipart/form-data\") != 0) {\n        seaf_debug (\"[upload] Invalid Content-Type\\n\");\n        g_strfreev (params);\n        return boundary;\n    }\n\n    for (p = params; *p != NULL; ++p) {\n        if (strncasecmp (*p, \"boundary\", strlen(\"boundary\")) == 0) {\n            boundary = get_http_header_param_value (*p);\n            break;\n        }\n    }\n    g_strfreev (params);\n    if (!boundary) {\n        seaf_debug (\"[upload] boundary not given\\n\");\n    }\n\n    return boundary;\n}\n\nstatic int\ncheck_access_token (const char *token,\n                    const char *url_op,\n                    char **repo_id,\n                    char **parent_dir,\n                    char **user,\n                    char **token_type,\n                    char **err_msg)\n{\n    SeafileWebAccess *webaccess;\n    const char *op;\n    const char *_repo_id;\n    const char *_obj_id;\n    const char *_parent_dir;\n    json_t *parent_dir_json;\n\n    webaccess = (SeafileWebAccess *)\n        seaf_web_at_manager_query_access_token (seaf->web_at_mgr, token);\n    if (!webaccess) {\n        *err_msg = \"Access token not found.\";\n        return -1;\n    }\n\n    _repo_id = seafile_web_access_get_repo_id (webaccess);\n    int status = seaf_repo_manager_get_repo_status(seaf->repo_mgr, _repo_id);\n    if (status != REPO_STATUS_NORMAL && status != -1) {\n        *err_msg = \"Repo status not writable.\";\n        g_object_unref (webaccess);\n        return -1;\n    }\n\n    /* token with op = \"upload\" can only be used for \"upload-*\" operations;\n     * token with op = \"update\" can only be used for \"update-*\" operations.\n     */\n    op = seafile_web_access_get_op (webaccess);\n    if (token_type)\n        *token_type = g_strdup (op);\n\n    if (g_strcmp0(op, \"upload-link\") == 0)\n        op = \"upload\";\n\n    if (strncmp (url_op, op, strlen(op)) != 0) {\n        *err_msg = \"Operation does not match access token.\";\n        g_object_unref (webaccess);\n        return -1;\n    }\n\n    *repo_id = g_strdup (_repo_id);\n    *user = g_strdup (seafile_web_access_get_username (webaccess));\n\n    _obj_id  = seafile_web_access_get_obj_id (webaccess);\n    parent_dir_json = json_loadb (_obj_id, strlen (_obj_id), 0, NULL);\n    if (parent_dir_json) {\n        _parent_dir = json_object_get_string_member (parent_dir_json, \"parent_dir\");\n        \n        if (_parent_dir){\n            *parent_dir = g_strdup(_parent_dir);\n        }\n        json_decref (parent_dir_json);\n    }\n\n    g_object_unref (webaccess);\n\n    return 0;\n}\n\nstatic gboolean\nparse_range_val (evhtp_headers_t *hdr, gint64 *rstart,\n                 gint64 *rend, gint64 *rfsize)\n{\n    const char *tmp = evhtp_kv_find (hdr, \"Content-Range\");\n    if (!tmp)\n        return TRUE;\n\n    char *next = NULL;\n    gint64 start;\n    gint64 end;\n    gint64 fsize;\n\n    if (strstr (tmp, \"bytes\") != tmp) {\n        return FALSE;\n    }\n\n    tmp += strlen(\"bytes\");\n    while (tmp && *tmp == ' ') {\n        tmp++;\n    }\n\n    start = strtoll (tmp, &next, 10);\n    if ((start == 0 && next == tmp) || *next != '-') {\n        return FALSE;\n    }\n\n    tmp = next + 1;\n    end = strtoll (tmp, &next, 10);\n    if ((end == 0 && next == tmp) || *next != '/') {\n        return FALSE;\n    }\n\n    tmp = next + 1;\n    fsize = strtoll (tmp, &next, 10);\n    if ((fsize == 0 && next == tmp) || *next != '\\0') {\n        return FALSE;\n    }\n\n    if (start > end || end >= fsize) {\n        return FALSE;\n    }\n\n    *rstart = start;\n    *rend = end;\n    *rfsize = fsize;\n\n    return TRUE;\n}\n\nstatic int\nget_progress_info (evhtp_request_t *req,\n                   evhtp_headers_t *hdr,\n                   gint64 *content_len,\n                   char **progress_id)\n{\n    const char *content_len_str;\n    const char *uuid;\n\n    uuid = evhtp_kv_find (req->uri->query, \"X-Progress-ID\");\n    /* If progress id is not given, we don't need content-length either. */\n    if (!uuid)\n        return 0;\n    *progress_id = g_strdup(uuid);\n\n    content_len_str = evhtp_kv_find (hdr, \"Content-Length\");\n    if (!content_len_str) {\n        seaf_debug (\"[upload] Content-Length not found.\\n\");\n        return -1;\n    }\n    *content_len = strtoll (content_len_str, NULL, 10);\n\n    return 0;\n}\n\nstatic evhtp_res\nupload_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg)\n{\n    char **parts = NULL;\n    char *token, *repo_id = NULL, *user = NULL;\n    char *parent_dir = NULL;\n    char *boundary = NULL;\n    gint64 content_len;\n    char *progress_id = NULL;\n    char *err_msg = NULL;\n    char *token_type = NULL;\n    RecvFSM *fsm = NULL;\n    Progress *progress = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n    htp_method method = evhtp_request_get_method(req);\n\n    if (method == htp_method_OPTIONS) {\n         return EVHTP_RES_OK;\n    }\n\n    /* URL format: http://host:port/[upload|update]/<token>?X-Progress-ID=<uuid> */\n    token = req->uri->path->file;\n    if (!token) {\n        seaf_debug (\"[upload] No token in url.\\n\");\n        err_msg = \"No token in url\";\n        goto err;\n    }\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 2) {\n        err_msg = \"Invalid URL\";\n        goto err;\n    }\n    char *url_op = parts[0];\n\n    if (check_access_token (token, url_op, &repo_id, &parent_dir, &user, &token_type, &err_msg) < 0) {\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto err;\n    }\n\n    gint64 rstart = -1;\n    gint64 rend = -1;\n    gint64 fsize = -1;\n    if (!parse_range_val (hdr, &rstart, &rend, &fsize)) {\n        seaf_warning (\"Invalid Seafile-Content-Range value.\\n\");\n        err_msg = \"Invalid Seafile-Content-Range\";\n        goto err;\n    }\n\n    if (method == htp_method_POST || method == htp_method_PUT) {\n        gint64 content_len = get_content_length (req);\n        if (fsize > 0) {\n            content_len = fsize;\n        }\n        // Check whether the file to be uploaded would exceed the quota before receiving the body, in order to avoid unnecessarily receiving the body.\n        // After receiving the body, the quota is checked again to handle cases where the Content-Length in the request header is missing, which could make the initial quota check inaccurate.\n        if (seaf_quota_manager_check_quota_with_delta (seaf->quota_mgr,\n                                                       repo_id,\n                                                       content_len) != 0) {\n            error_code = SEAF_HTTP_RES_NOQUOTA;\n            err_msg = \"Out of quota.\\n\";\n            goto err;\n        }\n\n        if (seaf->max_upload_size > 0 && content_len > seaf->max_upload_size) {\n            error_code = SEAF_HTTP_RES_TOOLARGE;\n            err_msg = \"File size is too large.\\n\";\n            goto err;\n        }\n    }\n\n    boundary = get_boundary (hdr);\n    if (!boundary) {\n        err_msg = \"Wrong boundary in url\";\n        goto err;\n    }\n\n    if (get_progress_info (req, hdr, &content_len, &progress_id) < 0) {\n        err_msg = \"No progress info\";\n        goto err;\n    }\n\n    if (progress_id != NULL) {\n        pthread_mutex_lock (&pg_lock);\n        if (g_hash_table_lookup (upload_progress, progress_id)) {\n            pthread_mutex_unlock (&pg_lock);\n            err_msg = \"Duplicate progress id.\\n\";\n            goto err;\n        }\n        pthread_mutex_unlock (&pg_lock);\n    }\n\n    fsm = g_new0 (RecvFSM, 1);\n    fsm->boundary = boundary;\n    fsm->repo_id = repo_id;\n    fsm->parent_dir = parent_dir;\n    fsm->user = user;\n    fsm->token_type = token_type;\n    fsm->rstart = rstart;\n    fsm->rend = rend;\n    fsm->fsize = fsize;\n    fsm->line = evbuffer_new ();\n    fsm->form_kvs = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                           g_free, g_free);\n    /* const char *need_idx_progress = evhtp_kv_find (req->uri->query, \"need_idx_progress\"); */\n    /* if (g_strcmp0(need_idx_progress, \"true\") == 0) */\n    /*     fsm->need_idx_progress = TRUE; */\n    fsm->need_idx_progress = FALSE;\n\n    if (progress_id != NULL) {\n        progress = g_new0 (Progress, 1);\n        progress->size = content_len;\n        fsm->progress_id = progress_id;\n        fsm->progress = progress;\n\n        pthread_mutex_lock (&pg_lock);\n        g_hash_table_insert (upload_progress, g_strdup(progress_id), progress);\n        pthread_mutex_unlock (&pg_lock);\n    }\n\n    seaf_metric_manager_in_flight_request_inc (seaf->metric_mgr);\n\n    /* Set up per-request hooks, so that we can read file data piece by piece. */\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_read, upload_read_cb, fsm);\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, upload_finish_cb, fsm);\n    /* Set arg for upload_cb or update_cb. */\n    req->cbarg = fsm;\n\n    g_strfreev (parts);\n\n    return EVHTP_RES_OK;\n\nerr:\n    /* Don't receive any data before the connection is closed. */\n    //evhtp_request_pause (req);\n\n    /* Set keepalive to 0. This will cause evhtp to close the\n     * connection after sending the reply.\n     */\n    req->keepalive = 0;\n    send_error_reply (req, error_code, err_msg);\n\n    g_free (repo_id);\n    g_free (user);\n    g_free (boundary);\n    g_free (token_type);\n    g_free (progress_id);\n    g_strfreev (parts);\n    return EVHTP_RES_OK;\n}\n\n/*\nstatic evhtp_res\nupload_link_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg)\n{\n    char **parts = NULL;\n    char *token = NULL;\n    const char *repo_id = NULL, *parent_dir = NULL;\n    char *r_parent_dir = NULL;\n    char *norm_parent_dir = NULL;\n    char *user = NULL;\n    char *boundary = NULL;\n    gint64 content_len;\n    char *progress_id = NULL;\n    char *err_msg = NULL;\n    RecvFSM *fsm = NULL;\n    Progress *progress = NULL;\n    int error_code = EVHTP_RES_BADREQ;\n    SeafileShareLinkInfo *info = NULL;\n\n    if (!seaf->seahub_pk) {\n        seaf_warning (\"No seahub private key is configured.\\n\");\n        return EVHTP_RES_NOTFOUND;\n    }\n\n    if (evhtp_request_get_method(req) == htp_method_OPTIONS) {\n         return EVHTP_RES_OK;\n    }\n\n    token = req->uri->path->file;\n    if (!token) {\n        seaf_debug (\"[upload] No token in url.\\n\");\n        err_msg = \"No token in url\";\n        goto err;\n    }\n\n    parts = g_strsplit (req->uri->path->full + 1, \"/\", 0);\n    if (!parts || g_strv_length (parts) < 2) {\n        err_msg = \"Invalid URL\";\n        goto err;\n    }\n\n    info = http_tx_manager_query_access_token (token, \"upload\");\n    if (!info) {\n        err_msg = \"Access token not found\\n\";\n        error_code = EVHTP_RES_FORBIDDEN;\n        goto err;\n    }\n    repo_id = seafile_share_link_info_get_repo_id (info);\n    parent_dir = seafile_share_link_info_get_parent_dir (info);\n    if (!parent_dir) {\n        err_msg = \"No parent_dir\\n\";\n        goto err;\n    }\n    norm_parent_dir = normalize_utf8_path (parent_dir); \n    r_parent_dir = format_dir_path (norm_parent_dir);\n\n    user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id);\n\n    boundary = get_boundary (hdr);\n    if (!boundary) {\n        err_msg = \"Wrong boundary in url\";\n        goto err;\n    }\n\n    if (get_progress_info (req, hdr, &content_len, &progress_id) < 0) {\n        err_msg = \"No progress info\";\n        goto err;\n    }\n\n    if (progress_id != NULL) {\n        pthread_mutex_lock (&pg_lock);\n        if (g_hash_table_lookup (upload_progress, progress_id)) {\n            pthread_mutex_unlock (&pg_lock);\n            err_msg = \"Duplicate progress id.\\n\";\n            goto err;\n        }\n        pthread_mutex_unlock (&pg_lock);\n    }\n\n    gint64 rstart = -1;\n    gint64 rend = -1;\n    gint64 fsize = -1;\n    if (!parse_range_val (hdr, &rstart, &rend, &fsize)) {\n        seaf_warning (\"Invalid Seafile-Content-Range value.\\n\");\n        err_msg = \"Invalid Seafile-Content-Range\";\n        goto err;\n    }\n\n    fsm = g_new0 (RecvFSM, 1);\n    fsm->boundary = boundary;\n    fsm->repo_id = g_strdup (repo_id);\n    fsm->parent_dir = r_parent_dir;\n    fsm->user = user;\n    fsm->token_type = \"upload-link\";\n    fsm->rstart = rstart;\n    fsm->rend = rend;\n    fsm->fsize = fsize;\n    fsm->line = evbuffer_new ();\n    fsm->form_kvs = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                           g_free, g_free);\n    // const char *need_idx_progress = evhtp_kv_find (req->uri->query, \"need_idx_progress\");\n    // if (g_strcmp0(need_idx_progress, \"true\") == 0) \n    //     fsm->need_idx_progress = TRUE; \n    fsm->need_idx_progress = FALSE;\n\n    if (progress_id != NULL) {\n        progress = g_new0 (Progress, 1);\n        progress->size = content_len;\n        fsm->progress_id = progress_id;\n        fsm->progress = progress;\n\n        pthread_mutex_lock (&pg_lock);\n        g_hash_table_insert (upload_progress, g_strdup(progress_id), progress);\n        pthread_mutex_unlock (&pg_lock);\n    }\n\n    // Set up per-request hooks, so that we can read file data piece by piece.\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_read, upload_read_cb, fsm);\n    evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, upload_finish_cb, fsm);\n    // Set arg for upload_cb or update_cb.\n    req->cbarg = fsm;\n\n    g_free (norm_parent_dir);\n    g_strfreev (parts);\n    g_object_unref (info);\n\n    return EVHTP_RES_OK;\n\nerr:\n    // Don't receive any data before the connection is closed.\n    // evhtp_request_pause (req);\n\n    // Set keepalive to 0. This will cause evhtp to close the\n    // connection after sending the reply.\n    req->keepalive = 0;\n    send_error_reply (req, error_code, err_msg);\n\n    g_free (norm_parent_dir);\n    g_free (r_parent_dir);\n    g_free (user);\n    g_free (boundary);\n    g_free (progress_id);\n    g_strfreev (parts);\n    if (info)\n        g_object_unref (info);\n    return EVHTP_RES_OK;\n}\n*/\n\nstatic void\nidx_progress_cb(evhtp_request_t *req, void *arg)\n{\n    const char *progress_id;\n\n    progress_id = evhtp_kv_find (req->uri->query, \"task_id\");\n    if (!progress_id) {\n        seaf_debug (\"[get pg] Index task id not found in url.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"task id not found\");\n        return;\n    }\n    char *progress_info = index_blocks_mgr_query_progress (seaf->index_blocks_mgr,\n                                                           progress_id, NULL);\n    if (!progress_info) {\n        send_error_reply (req, EVHTP_RES_NOTFOUND, \"Failed to get index progress\");\n        return;\n    }\n    evbuffer_add (req->buffer_out, progress_info, strlen(progress_info));\n    send_success_reply (req);\n\n    g_free (progress_info);\n}\n\nstatic void\nupload_progress_cb(evhtp_request_t *req, void *arg)\n{\n    const char *progress_id;\n    const char *callback;\n    Progress *progress;\n    GString *buf;\n\n    progress_id = evhtp_kv_find (req->uri->query, \"X-Progress-ID\");\n    if (!progress_id) {\n        seaf_debug (\"[get pg] Progress id not found in url.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Progress id not found\");\n        return;\n    }\n\n    callback = evhtp_kv_find (req->uri->query, \"callback\");\n    if (!callback) {\n        seaf_debug (\"[get pg] callback not found in url.\\n\");\n        send_error_reply (req, EVHTP_RES_BADREQ, \"Callback not found\");\n        return;\n    }\n\n    pthread_mutex_lock (&pg_lock);\n    progress = g_hash_table_lookup (upload_progress, progress_id);\n    pthread_mutex_unlock (&pg_lock);\n\n    if (!progress) {\n        /* seaf_warning (\"[get pg] No progress found for %s.\\n\", progress_id); */\n        send_error_reply (req, EVHTP_RES_BADREQ, \"No progress found.\\n\");\n        return;\n    }\n\n    /* Return JSONP formated data. */\n    buf = g_string_new (NULL);\n    g_string_append_printf (buf,\n                            \"%s({\\\"uploaded\\\": %\"G_GINT64_FORMAT\", \\\"length\\\": %\"G_GINT64_FORMAT\"});\",\n                            callback, progress->uploaded, progress->size);\n    evbuffer_add (req->buffer_out, buf->str, buf->len);\n\n    seaf_debug (\"JSONP: %s\\n\", buf->str);\n\n    send_success_reply (req);\n    g_string_free (buf, TRUE);\n}\n\nint\nupload_file_init (evhtp_t *htp, const char *http_temp_dir)\n{\n    evhtp_callback_t *cb;\n\n    if (g_mkdir_with_parents (http_temp_dir, 0777) < 0) {\n        seaf_warning (\"Failed to create temp file dir %s.\\n\",\n                      http_temp_dir);\n        return -1;\n    }\n\n    char *cluster_shared_dir = g_strdup_printf (\"%s/cluster-shared\", http_temp_dir);\n    if (g_mkdir_with_parents (cluster_shared_dir, 0777) < 0) {\n        seaf_warning (\"Failed to create cluster shared dir %s.\\n\",\n                cluster_shared_dir);\n        g_free (cluster_shared_dir);\n        return -1;\n    }\n    g_free (cluster_shared_dir);\n\n    cb = evhtp_set_regex_cb (htp, \"^/upload-api/.*\", upload_api_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/upload-raw-blks-api/.*\",\n                             upload_raw_blks_api_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/upload-blks-api/.*\", upload_blks_api_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    /* cb = evhtp_set_regex_cb (htp, \"^/upload-blks-aj/.*\", upload_blks_ajax_cb, NULL); */\n    /* evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); */\n\n    cb = evhtp_set_regex_cb (htp, \"^/upload-aj/.*\", upload_ajax_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/update-api/.*\", update_api_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    cb = evhtp_set_regex_cb (htp, \"^/update-blks-api/.*\", update_blks_api_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    /* cb = evhtp_set_regex_cb (htp, \"^/update-blks-aj/.*\", update_blks_ajax_cb, NULL); */\n    /* evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); */\n\n    cb = evhtp_set_regex_cb (htp, \"^/update-aj/.*\", update_ajax_cb, NULL);\n    evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL);\n\n    // upload links\n    // cb = evhtp_set_regex_cb (htp, \"^/u/.*\", upload_link_cb, NULL);\n    //evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_link_headers_cb, NULL);\n\n    evhtp_set_regex_cb (htp, \"^/upload_progress.*\", upload_progress_cb, NULL);\n\n    evhtp_set_regex_cb (htp, \"^/idx_progress.*\", idx_progress_cb, NULL);\n\n    upload_progress = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                             g_free, g_free);\n    pthread_mutex_init (&pg_lock, NULL);\n\n    return 0;\n}\n#endif\n"
  },
  {
    "path": "server/upload-file.h",
    "content": "#ifndef UPLOAD_FILE_H\n#define UPLOAD_FILE_H\n\n#ifdef HAVE_EVHTP\nint\nupload_file_init (evhtp_t *evhtp, const char *http_temp_dir);\n#endif\n\n#endif\n"
  },
  {
    "path": "server/virtual-repo.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include \"utils.h\"\n\n#define DEBUG_FLAG SEAFILE_DEBUG_OTHER\n#include \"log.h\"\n\n#include <timer.h>\n#include <pthread.h>\n\n#include \"seafile-session.h\"\n#include \"commit-mgr.h\"\n#include \"branch-mgr.h\"\n#include \"repo-mgr.h\"\n#include \"fs-mgr.h\"\n#include \"seafile-error.h\"\n#include \"seafile-crypt.h\"\n#include \"merge-new.h\"\n#include \"seafile-error.h\"\n\n#include \"seaf-db.h\"\n#include \"diff-simple.h\"\n\n#define MAX_RUNNING_TASKS 5\n#define SCHEDULE_INTERVAL 1000  /* 1s */\n\ntypedef struct MergeTask {\n    char repo_id[37];\n} MergeTask;\n\ntypedef struct MergeScheduler {\n    pthread_mutex_t q_lock;\n    GQueue *queue;\n    GHashTable *running;\n    CcnetJobManager *tpool;\n    CcnetTimer *timer;\n} MergeScheduler;\n\nstatic MergeScheduler *scheduler = NULL;\n\nstatic void\nadd_merge_task (const char *repo_id);\n\nstatic int\nsave_virtual_repo_info (SeafRepoManager *mgr,\n                        const char *repo_id,\n                        const char *origin_repo_id,\n                        const char *path,\n                        const char *base_commit)\n{\n    int ret = 0;\n\n    if (seaf_db_statement_query (mgr->seaf->db,\n                       \"INSERT INTO VirtualRepo (repo_id, origin_repo, path, base_commit) VALUES (?, ?, ?, ?)\",\n                       4, \"string\", repo_id, \"string\", origin_repo_id,\n                       \"string\", path, \"string\", base_commit) < 0)\n        ret = -1;\n\n    return ret;\n}\n\nstatic int\ndo_create_virtual_repo (SeafRepoManager *mgr,\n                        SeafRepo *origin_repo,\n                        const char *repo_id,\n                        const char *repo_name,\n                        const char *repo_desc,\n                        const char *root_id,\n                        const char *user,\n                        const char *passwd,\n                        GError **error)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *commit = NULL;\n    SeafBranch *master = NULL;\n    int ret = 0;\n\n    repo = seaf_repo_new (repo_id, repo_name, repo_desc);\n\n    repo->no_local_history = TRUE;\n    if (passwd != NULL && passwd[0] != '\\0') {\n        repo->encrypted = TRUE;\n        repo->enc_version = origin_repo->enc_version;\n        if (repo->enc_version >= 3)\n            memcpy (repo->salt, origin_repo->salt, 64);\n            if (origin_repo->pwd_hash_algo)\n                repo->pwd_hash_algo = g_strdup (origin_repo->pwd_hash_algo);\n            if (origin_repo->pwd_hash_params)\n                repo->pwd_hash_params = g_strdup (origin_repo->pwd_hash_params);\n            if (repo->pwd_hash_algo) {\n                seafile_generate_pwd_hash (repo->enc_version, repo_id, passwd, repo->salt,\n                                           repo->pwd_hash_algo, repo->pwd_hash_params, repo->pwd_hash);\n                memcpy (repo->magic, repo->pwd_hash, 32);\n            } else\n                seafile_generate_magic (repo->enc_version, repo_id, passwd, repo->salt,\n                                        repo->magic);\n        if (repo->enc_version >= 2)\n            memcpy (repo->random_key, origin_repo->random_key, 96);\n    }\n\n    /* Virtual repos share fs and block store with origin repo and\n     * have the same version as the origin.\n     */\n    repo->version = origin_repo->version;\n    memcpy (repo->store_id, origin_repo->id, 36);\n\n    commit = seaf_commit_new (NULL, repo->id,\n                              root_id, /* root id */\n                              user, /* creator */\n                              EMPTY_SHA1, /* creator id */\n                              repo_desc,  /* description */\n                              0);         /* ctime */\n\n    seaf_repo_to_commit (repo, commit);\n    if (seaf_commit_manager_add_commit (seaf->commit_mgr, commit) < 0) {\n        seaf_warning (\"Failed to add commit.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add commit\");\n        ret = -1;\n        goto out;\n    }\n\n    master = seaf_branch_new (\"master\", repo->id, commit->commit_id);\n    if (seaf_branch_manager_add_branch (seaf->branch_mgr, master) < 0) {\n        seaf_warning (\"Failed to add branch.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add branch\");\n        ret = -1;\n        goto out;\n    }\n\n    if (seaf_repo_set_head (repo, master) < 0) {\n        seaf_warning (\"Failed to set repo head.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to set repo head.\");\n        ret = -1;\n        goto out;\n    }\n\n    if (seaf_repo_manager_add_repo (mgr, repo) < 0) {\n        seaf_warning (\"Failed to add repo.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add repo.\");\n        ret = -1;\n        goto out;\n    }\n\n    if (set_repo_commit_to_db (repo_id, repo_name, commit->ctime,\n                               repo->version, repo->encrypted, user) < 0) {\n        seaf_warning(\"Failed to add repo info.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to add repo info\");\n        ret = -1;\n        goto out;\n    }\n\nout:\n    if (repo)\n        seaf_repo_unref (repo);\n    if (commit)\n        seaf_commit_unref (commit);\n    if (master)\n        seaf_branch_unref (master);\n    \n    return ret;    \n}\n\nstatic void\nupdate_repo_size(const char *repo_id)\n{\n    schedule_repo_size_computation (seaf->size_sched, repo_id);\n}\n\nstatic char *\nget_existing_virtual_repo (SeafRepoManager *mgr,\n                           const char *origin_repo_id,\n                           const char *path)\n{\n    char *sql = \"SELECT repo_id FROM VirtualRepo WHERE origin_repo = ? AND path = ?\";\n\n    return seaf_db_statement_get_string (mgr->seaf->db, sql, 2,\n                                         \"string\", origin_repo_id, \"string\", path);\n}\n\nstatic char *\ncreate_virtual_repo_common (SeafRepoManager *mgr,\n                            const char *origin_repo_id,\n                            const char *path,\n                            const char *repo_name,\n                            const char *repo_desc,\n                            const char *owner,\n                            const char *passwd,\n                            GError **error)\n{\n    SeafRepo *origin_repo = NULL;\n    SeafCommit *origin_head = NULL;\n    char *repo_id = NULL;\n    char *dir_id = NULL;\n\n    origin_repo = seaf_repo_manager_get_repo (mgr, origin_repo_id);\n    if (!origin_repo) {\n        seaf_warning (\"Failed to get origin repo %.10s\\n\", origin_repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Origin library not exists\");\n        return NULL;\n    }\n    if (origin_repo->status != REPO_STATUS_NORMAL) {\n        seaf_warning(\"Status of repo %.8s is %d, can't create VirtualRepo\\n\",\n                     origin_repo_id, origin_repo->status);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Unnormal repo status\");\n        seaf_repo_unref (origin_repo);\n        return NULL;\n    }\n\n    if (origin_repo->encrypted) {\n        if (origin_repo->enc_version < 2) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Library encryption version must be higher than 2\");\n            seaf_repo_unref (origin_repo);\n            return NULL;\n        }\n\n        if (!passwd || passwd[0] == 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS,\n                         \"Password is not set\");\n            seaf_repo_unref (origin_repo);\n            return NULL;\n        }\n\n        if (origin_repo->pwd_hash_algo) {\n            if (seafile_pwd_hash_verify_repo_passwd (origin_repo->enc_version,\n                                                     origin_repo_id,\n                                                     passwd,\n                                                     origin_repo->salt,\n                                                     origin_repo->pwd_hash,\n                                                     origin_repo->pwd_hash_algo,\n                                                     origin_repo->pwd_hash_params) < 0) {\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Incorrect password\");\n                seaf_repo_unref (origin_repo);\n                return NULL;\n            }\n        } else {\n            if (seafile_verify_repo_passwd (origin_repo_id,\n                                            passwd,\n                                            origin_repo->magic,\n                                            origin_repo->enc_version,\n                                            origin_repo->salt) < 0) {\n                g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                             \"Incorrect password\");\n                seaf_repo_unref (origin_repo);\n                return NULL;\n            }\n        }\n    }\n\n    origin_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                  origin_repo->id,\n                                                  origin_repo->version,\n                                                  origin_repo->head->commit_id);\n    if (!origin_head) {\n        seaf_warning (\"Failed to get head commit %.8s of repo %s.\\n\",\n                      origin_repo->head->commit_id, origin_repo->id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Bad origin repo head\");\n        goto error;\n    }\n\n    dir_id = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                     origin_repo->store_id,\n                                                     origin_repo->version,\n                                                     origin_head->root_id,\n                                                     path, NULL);\n    if (!dir_id) {\n        seaf_warning (\"Path %s doesn't exist or is not a dir in repo %.10s.\\n\",\n                      path, origin_repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_BAD_ARGS, \"Bad path\");\n        goto error;\n    }\n\n    repo_id = gen_uuid();\n\n    /* Save virtual repo info before actually create the repo.\n     */\n    if (save_virtual_repo_info (mgr, repo_id, origin_repo_id,\n                                path, origin_head->commit_id) < 0) {\n        seaf_warning (\"Failed to save virtual repo info for %.10s:%s\",\n                      origin_repo_id, path);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"Internal error\");\n        goto error;\n    }\n\n    if (do_create_virtual_repo (mgr, origin_repo, repo_id, repo_name, repo_desc,\n                                dir_id, owner, passwd, error) < 0)\n        goto error;\n\n    /* The size of virtual repo is non-zero at the beginning. */\n    update_repo_size (repo_id);\n\n    seaf_repo_unref (origin_repo);\n    seaf_commit_unref (origin_head);\n    g_free (dir_id);\n    return repo_id;\n\nerror:\n    seaf_repo_unref (origin_repo);\n    seaf_commit_unref (origin_head);\n    g_free (repo_id);\n    g_free (dir_id);\n    return NULL;\n}\n\nstatic char *\ncanonical_vrepo_path (const char *path)\n{\n    char *ret = NULL;\n\n    if (path[0] != '/')\n        ret = g_strconcat (\"/\", path, NULL);\n    else\n        ret = g_strdup(path);\n\n    int len = strlen(ret);\n    int i = len - 1;\n    while (i >= 0 && ret[i] == '/')\n        ret[i--] = 0;\n\n    return ret;\n}\n\nchar *\nseaf_repo_manager_create_virtual_repo (SeafRepoManager *mgr,\n                                       const char *origin_repo_id,\n                                       const char *path,\n                                       const char *repo_name,\n                                       const char *repo_desc,\n                                       const char *owner,\n                                       const char *passwd,\n                                       GError **error)\n{\n    char *repo_id = NULL;\n    char *orig_owner = NULL;\n    char *canon_path = NULL;\n    SeafVirtRepo *vrepo = NULL;\n    char *r_origin_repo_id = NULL;\n    char *r_path = NULL;\n\n    if (g_strcmp0 (path, \"/\") == 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Invalid path\");\n        return NULL;\n    }\n\n    canon_path = canonical_vrepo_path (path);\n    vrepo = seaf_repo_manager_get_virtual_repo_info (mgr, origin_repo_id);\n    if (vrepo) {\n        // virtual repo\n        r_path = g_strconcat(vrepo->path, canon_path, NULL);\n        r_origin_repo_id = g_strdup (vrepo->origin_repo_id);\n        seaf_virtual_repo_info_free (vrepo);\n        repo_id = get_existing_virtual_repo (mgr, r_origin_repo_id, r_path);\n        if (repo_id) {\n            g_free (r_origin_repo_id);\n            g_free (r_path);\n            g_free (canon_path);\n            return repo_id;\n        }\n    } else {\n        r_path = g_strdup (canon_path);\n        r_origin_repo_id = g_strdup (origin_repo_id);\n        repo_id = get_existing_virtual_repo (mgr, r_origin_repo_id, r_path);\n        if (repo_id) {\n            g_free (r_origin_repo_id);\n            g_free (r_path);\n            g_free (canon_path);\n            return repo_id;\n        }\n     }\n\n    orig_owner = seaf_repo_manager_get_repo_owner (mgr, r_origin_repo_id);\n\n    repo_id = create_virtual_repo_common (mgr, r_origin_repo_id, r_path,\n                                          repo_name, repo_desc, orig_owner,\n                                          passwd, error);\n    if (!repo_id) {\n        goto out;\n    }\n\n    if (seaf_repo_manager_set_repo_owner (mgr, repo_id, orig_owner) < 0) {\n        seaf_warning (\"Failed to set repo owner for %.10s.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to set repo owner.\");\n        g_free (repo_id);\n        repo_id = NULL;\n    }\n\nout:\n    g_free (orig_owner);\n    g_free (r_origin_repo_id);\n    g_free (r_path);\n    g_free (canon_path);\n    return repo_id;\n}\n\nstatic gboolean\nload_virtual_info (SeafDBRow *row, void *p_vinfo)\n{\n    SeafVirtRepo *vinfo;\n    const char *repo_id, *origin_repo_id, *path, *base_commit;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    origin_repo_id = seaf_db_row_get_column_text (row, 1);\n    path = seaf_db_row_get_column_text (row, 2);\n    base_commit = seaf_db_row_get_column_text (row, 3);\n\n    vinfo = g_new0 (SeafVirtRepo, 1);\n    memcpy (vinfo->repo_id, repo_id, 36);\n    memcpy (vinfo->origin_repo_id, origin_repo_id, 36);\n    vinfo->path = g_strdup(path);\n    memcpy (vinfo->base_commit, base_commit, 40);\n\n    *((SeafVirtRepo **)p_vinfo) = vinfo;\n\n    return FALSE;\n}\n\nSeafVirtRepo *\nseaf_repo_manager_get_virtual_repo_info (SeafRepoManager *mgr,\n                                         const char *repo_id)\n{\n    char *sql;\n    SeafVirtRepo *vinfo = NULL;\n\n    sql = \"SELECT repo_id, origin_repo, path, base_commit FROM VirtualRepo \"\n        \"WHERE repo_id = ?\";\n    seaf_db_statement_foreach_row (seaf->db, sql, load_virtual_info, &vinfo,\n                                   1, \"string\", repo_id);\n\n    return vinfo;\n}\n\nvoid\nseaf_virtual_repo_info_free (SeafVirtRepo *vinfo)\n{\n    if (!vinfo) return;\n\n    g_free (vinfo->path);\n    g_free (vinfo);\n}\n\ngboolean\nseaf_repo_manager_is_virtual_repo (SeafRepoManager *mgr, const char *repo_id)\n{\n    gboolean db_err;\n\n    char *sql = \"SELECT 1 FROM VirtualRepo WHERE repo_id = ?\";\n    return seaf_db_statement_exists (seaf->db, sql, &db_err,\n                                     1, \"string\", repo_id);\n}\n\nchar *\nseaf_repo_manager_get_virtual_repo_id (SeafRepoManager *mgr,\n                                       const char *origin_repo,\n                                       const char *path,\n                                       const char *owner)\n{\n    char *sql;\n    char *ret;\n\n    if (owner) {\n        sql = \"SELECT RepoOwner.repo_id FROM RepoOwner, VirtualRepo \"\n              \"WHERE owner_id=? AND origin_repo=? AND path=? \"\n              \"AND RepoOwner.repo_id = VirtualRepo.repo_id\";\n        ret = seaf_db_statement_get_string (mgr->seaf->db, sql,\n                                            3, \"string\", owner,\n                                            \"string\", origin_repo, \"string\", path);\n    } else {\n        sql = \"SELECT repo_id FROM VirtualRepo \"\n              \"WHERE origin_repo=? AND path=? \";\n        ret = seaf_db_statement_get_string (mgr->seaf->db, sql,\n                                            2, \"string\", origin_repo, \"string\", path);\n    }\n\n    return ret;\n}\n\nstatic gboolean\ncollect_virtual_repo_ids (SeafDBRow *row, void *data)\n{\n    GList **p_ids = data;\n    const char *repo_id;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    *p_ids = g_list_prepend (*p_ids, g_strdup(repo_id));\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_virtual_repos_by_owner (SeafRepoManager *mgr,\n                                              const char *owner,\n                                              GError **error)\n{\n    GList *id_list = NULL, *ptr;\n    GList *ret = NULL;\n    char *sql;\n\n    sql = \"SELECT RepoOwner.repo_id FROM RepoOwner, VirtualRepo \"\n        \"WHERE owner_id=? \"\n        \"AND RepoOwner.repo_id = VirtualRepo.repo_id\";\n\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       collect_virtual_repo_ids, &id_list,\n                                       1, \"string\", owner) < 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL, \"DB error\");\n        return NULL;\n    }\n\n    char *repo_id;\n    SeafRepo *repo;\n    for (ptr = id_list; ptr; ptr = ptr->next) {\n        repo_id = ptr->data;\n        repo = seaf_repo_manager_get_repo (mgr, repo_id);\n        if (repo != NULL)\n            ret = g_list_prepend (ret, repo);\n    }\n\n    string_list_free (id_list);\n    return ret;\n}\n\nGList *\nseaf_repo_manager_get_virtual_repo_ids_by_origin (SeafRepoManager *mgr,\n                                                  const char *origin_repo)\n{\n    GList *ret = NULL;\n    char *sql;\n\n    sql = \"SELECT repo_id FROM VirtualRepo WHERE origin_repo=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       collect_virtual_repo_ids, &ret,\n                                       1, \"string\", origin_repo) < 0) {\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic gboolean\ncollect_virtual_info (SeafDBRow *row, void *plist)\n{\n    GList **pret = plist;\n    SeafVirtRepo *vinfo;\n    const char *repo_id, *origin_repo_id, *path, *base_commit;\n\n    repo_id = seaf_db_row_get_column_text (row, 0);\n    origin_repo_id = seaf_db_row_get_column_text (row, 1);\n    path = seaf_db_row_get_column_text (row, 2);\n    base_commit = seaf_db_row_get_column_text (row, 3);\n\n    vinfo = g_new0 (SeafVirtRepo, 1);\n    memcpy (vinfo->repo_id, repo_id, 36);\n    memcpy (vinfo->origin_repo_id, origin_repo_id, 36);\n    vinfo->path = g_strdup(path);\n    memcpy (vinfo->base_commit, base_commit, 40);\n\n    *pret = g_list_prepend (*pret, vinfo);\n\n    return TRUE;\n}\n\nGList *\nseaf_repo_manager_get_virtual_info_by_origin (SeafRepoManager *mgr,\n                                              const char *origin_repo)\n{\n    GList *ret = NULL;\n    char *sql;\n\n    sql = \"SELECT repo_id, origin_repo, path, base_commit \"\n        \"FROM VirtualRepo WHERE origin_repo=?\";\n    if (seaf_db_statement_foreach_row (mgr->seaf->db, sql, \n                                       collect_virtual_info, &ret,\n                                       1, \"string\", origin_repo) < 0) {\n        return NULL;\n    }\n\n    return g_list_reverse (ret);\n}\n\nstatic void\nset_virtual_repo_base_commit_path (const char *vrepo_id, const char *base_commit_id,\n                                   const char *new_path)\n{\n    seaf_db_statement_query (seaf->db,\n                             \"UPDATE VirtualRepo SET base_commit=?, path=? WHERE repo_id=?\",\n                             3, \"string\", base_commit_id, \"string\", new_path,\n                             \"string\", vrepo_id);\n}\n\nint\nseaf_repo_manager_merge_virtual_repo (SeafRepoManager *mgr,\n                                      const char *repo_id,\n                                      const char *exclude_repo)\n{\n    GList *vrepos = NULL, *ptr;\n    char *vrepo_id;\n    int ret = 0;\n\n    if (seaf_repo_manager_is_virtual_repo (mgr, repo_id)) {\n        add_merge_task (repo_id);\n        return 0;\n    }\n\n    vrepos = seaf_repo_manager_get_virtual_repo_ids_by_origin (mgr, repo_id);\n    for (ptr = vrepos; ptr; ptr = ptr->next) {\n        vrepo_id = ptr->data;\n\n        if (g_strcmp0 (exclude_repo, vrepo_id) == 0)\n            continue;\n\n        add_merge_task (vrepo_id);\n    }\n\n    string_list_free (vrepos);\n    return ret;\n}\n\n/*\n * If the missing virtual repo is renamed, update database entry;\n * otherwise delete the virtual repo.\n */\nstatic void\nhandle_missing_virtual_repo (SeafRepoManager *mgr,\n                             SeafRepo *repo, SeafCommit *head, SeafVirtRepo *vinfo,\n                             char **return_new_path)\n{\n    SeafCommit *parent = NULL;\n    char *old_dir_id = NULL;\n    GList *diff_res = NULL, *ptr;\n    DiffEntry *de;\n\n    parent = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                             head->repo_id, head->version,\n                                             head->parent_id);\n    if (!parent) {\n        seaf_warning (\"Failed to find commit %s:%s.\\n\", head->repo_id, head->parent_id);\n        return;\n    }\n\n    int rc = diff_commits (parent, head, &diff_res, TRUE);\n    if (rc < 0) {\n        seaf_warning (\"Failed to diff commit %s to %s.\\n\",\n                      parent->commit_id, head->commit_id);\n        seaf_commit_unref (parent);\n        return;\n    }\n\n    char *path = vinfo->path, *sub_path, *p, *par_path;\n    gboolean is_renamed = FALSE;\n    p = &path[strlen(path)];\n    par_path = g_strdup(path);\n    sub_path = NULL;\n\n    while (1) {\n        GError *error = NULL;\n        old_dir_id = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                             repo->store_id,\n                                                             repo->version,\n                                                             parent->root_id,\n                                                             par_path, &error);\n        if (!old_dir_id) {\n            if (error && error->code == SEAF_ERR_PATH_NO_EXIST) {\n                seaf_warning (\"Failed to find %s under commit %s in repo %s.\\n\",\n                              par_path, parent->commit_id, repo->store_id);\n                seaf_debug (\"Delete virtual repo %.10s.\\n\", vinfo->repo_id);\n                seaf_repo_manager_del_virtual_repo (mgr, vinfo->repo_id);\n                g_clear_error (&error);\n            }\n            goto out;\n        }\n\n        char de_id[41];\n        char *new_path, *new_name;\n\n        for (ptr = diff_res; ptr; ptr = ptr->next) {\n            de = ptr->data;\n            if (de->status == DIFF_STATUS_DIR_RENAMED) {\n                rawdata_to_hex (de->sha1, de_id, 20);\n                if (strcmp (de_id, old_dir_id) == 0) {\n                    if (sub_path != NULL)\n                        new_path = g_strconcat (\"/\", de->new_name, \"/\", sub_path, NULL);\n                    else\n                        new_path = g_strconcat (\"/\", de->new_name, NULL);\n                    seaf_debug (\"Updating path of virtual repo %s to %s.\\n\",\n                                vinfo->repo_id, new_path);\n                    set_virtual_repo_base_commit_path (vinfo->repo_id,\n                                                       head->commit_id, new_path);\n                    if (return_new_path)\n                        *return_new_path = g_strdup(new_path);\n                    /* 'sub_path = NUll' means the virtual dir itself has been renamed,\n                     *  we need to make a new commit for the virtual repo\n                     */\n                    if (sub_path == NULL) {\n                        new_name = g_path_get_basename(new_path);\n                        seaf_repo_manager_edit_repo (vinfo->repo_id,\n                                                     new_name,\n                                                     \"Changed library name\",\n                                                     NULL,\n                                                     &error);\n                        if (error) {\n                            seaf_warning (\"Failed to rename repo %s\", new_name);\n                            g_clear_error (&error);\n                        }\n                        g_free(new_name);\n                    }\n                    is_renamed = TRUE;\n                    g_free (new_path);\n                    break;\n                }\n            }\n        }\n        g_free (old_dir_id);\n\n        if (is_renamed)\n            break;\n\n        while (--p != path && *p != '/');\n\n        if (p == path)\n            break;\n\n        g_free (par_path);\n        g_free (sub_path);\n        par_path = g_strndup (path, p - path);\n        sub_path = g_strdup (p + 1);\n    }\n\n    if (!is_renamed) {\n        seaf_debug (\"Delete virtual repo %.10s.\\n\", vinfo->repo_id);\n        seaf_repo_manager_del_virtual_repo (mgr, vinfo->repo_id);\n    }\n\nout:\n    g_free (par_path);\n    g_free (sub_path);\n\n    for (ptr = diff_res; ptr; ptr = ptr->next)\n        diff_entry_free ((DiffEntry *)ptr->data);\n    g_list_free (diff_res);\n\n    seaf_commit_unref (parent);\n}\n\nvoid\nseaf_repo_manager_cleanup_virtual_repos (SeafRepoManager *mgr,\n                                         const char *origin_repo_id)\n{\n    SeafRepo *repo = NULL;\n    SeafCommit *head = NULL;\n    GList *vinfo_list = NULL, *ptr;\n    SeafVirtRepo *vinfo;\n    SeafDir *dir;\n    GError *error = NULL;\n\n    repo = seaf_repo_manager_get_repo (mgr, origin_repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %.10s.\\n\", origin_repo_id);\n        goto out;\n    }\n\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo->id,\n                                           repo->version,\n                                           repo->head->commit_id);\n    if (!head) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\",\n                      repo->id, repo->head->commit_id);\n        goto out;\n    }\n\n    vinfo_list = seaf_repo_manager_get_virtual_info_by_origin (mgr,\n                                                               origin_repo_id);\n    for (ptr = vinfo_list; ptr; ptr = ptr->next) {\n        vinfo = ptr->data;\n        dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr,\n                                                   repo->store_id,\n                                                   repo->version,\n                                                   head->root_id,\n                                                   vinfo->path,\n                                                   &error);\n        if (error) {\n            if (error->code == SEAF_ERR_PATH_NO_EXIST) {\n                handle_missing_virtual_repo (mgr, repo, head, vinfo, NULL);\n            }\n            g_clear_error (&error);\n        } else\n            seaf_dir_free (dir);\n        seaf_virtual_repo_info_free (vinfo);\n    }\n\nout:\n    seaf_repo_unref (repo);\n    seaf_commit_unref (head);\n    g_list_free (vinfo_list);\n}\n\nstatic void *merge_virtual_repo (void *vtask)\n{\n    MergeTask *task = vtask;\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    char *repo_id = task->repo_id;\n    SeafVirtRepo *vinfo;\n    SeafRepo *repo = NULL, *orig_repo = NULL;\n    SeafCommit *head = NULL, *orig_head = NULL, *base = NULL;\n    char *root = NULL, *orig_root = NULL, *base_root = NULL;\n    char new_base_commit[41] = {0};\n    int ret = 0;\n    GError *error = NULL;\n\n    /* repos */\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get virt repo %.10s.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    vinfo = repo->virtual_info;\n\n    orig_repo = seaf_repo_manager_get_repo (mgr, vinfo->origin_repo_id);\n    if (!orig_repo) {\n        seaf_warning (\"Failed to get orig repo %.10s.\\n\", vinfo->origin_repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    /* commits */\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo->id, repo->version,\n                                           repo->head->commit_id);\n    if (!head) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\",\n                      repo->id, repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    orig_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                orig_repo->id, orig_repo->version,\n                                                orig_repo->head->commit_id);\n    if (!orig_head) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\",\n                      orig_repo->id, orig_repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                        orig_repo->store_id,\n                                                        orig_repo->version,\n                                                        orig_head->root_id,\n                                                        vinfo->path,\n                                                        &error);\n    if (error &&\n        !g_error_matches(error,\n                         SEAFILE_DOMAIN,\n                         SEAF_ERR_PATH_NO_EXIST)) {\n        seaf_warning (\"Failed to get seafdir id by path in origin repo %.10s: %s.\\n\", orig_repo->store_id, error->message);\n        ret = -1;\n        goto out;\n    }\n    if (!orig_root) {\n        seaf_debug(\"Path %s not found in origin repo %.8s, delete or rename virtual repo %.8s\\n\",\n                    vinfo->path, vinfo->origin_repo_id, repo_id);\n\n        char *new_path = NULL;\n        handle_missing_virtual_repo (mgr, orig_repo, orig_head, vinfo, &new_path);\n        if (new_path != NULL) {\n            orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                        orig_repo->store_id,\n                                                        orig_repo->version,\n                                                        orig_head->root_id,\n                                                        new_path,\n                                                        NULL);\n            g_free (new_path);\n        }\n        if (!orig_root)\n            goto out;\n    }\n\n    base = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           orig_repo->id, orig_repo->version,\n                                           vinfo->base_commit);\n    if (!base) {\n        seaf_warning (\"Failed to get commit %s:%.8s.\\n\",\n                      orig_repo->id, vinfo->base_commit);\n        ret = -1;\n        goto out;\n    }\n\n    /* fs roots */\n    root = head->root_id;\n\n    base_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                        orig_repo->store_id,\n                                                        orig_repo->version,\n                                                        base->root_id,\n                                                        vinfo->path,\n                                                        NULL);\n    if (!base_root) {\n        seaf_warning (\"Cannot find seafdir for repo %.10s path %s.\\n\",\n                      vinfo->origin_repo_id, vinfo->path);\n        ret = -1;\n        goto out;\n    }\n\n    if (strcmp (root, orig_root) == 0) {\n        /* Nothing to merge. */\n        seaf_debug (\"Nothing to merge.\\n\");\n    } else if (strcmp (base_root, root) == 0) {\n        /* Origin changed, virtual repo not changed. */\n        seaf_debug (\"Origin changed, virtual repo not changed.\\n\");\n        ret = seaf_repo_manager_update_dir (mgr,\n                                            repo_id,\n                                            \"/\",\n                                            orig_root,\n                                            orig_head->creator_name,\n                                            head->commit_id,\n                                            NULL,\n                                            NULL);\n        if (ret < 0) {\n            seaf_warning (\"Failed to update root of virtual repo %.10s.\\n\",\n                          repo_id);\n            goto out;\n        }\n\n        set_virtual_repo_base_commit_path (repo->id, orig_repo->head->commit_id,\n                                           vinfo->path);\n    } else if (strcmp (base_root, orig_root) == 0) {\n        /* Origin not changed, virutal repo changed. */\n        seaf_debug (\"Origin not changed, virutal repo changed.\\n\");\n        ret = seaf_repo_manager_update_dir (mgr,\n                                            vinfo->origin_repo_id,\n                                            vinfo->path,\n                                            root,\n                                            head->creator_name,\n                                            orig_head->commit_id,\n                                            new_base_commit,\n                                            NULL);\n        if (ret < 0) {\n            seaf_warning (\"Failed to update origin repo %.10s path %s.\\n\",\n                          vinfo->origin_repo_id, vinfo->path);\n            goto out;\n        }\n\n        set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path);\n\n        /* Since origin repo is updated, we have to merge it with other\n         * virtual repos if necessary. But we don't need to merge with\n         * the current virtual repo again.\n         */\n        seaf_repo_manager_cleanup_virtual_repos (mgr, vinfo->origin_repo_id);\n        seaf_repo_manager_merge_virtual_repo (mgr,\n                                              vinfo->origin_repo_id,\n                                              repo_id);\n    } else {\n        /* Both origin and virtual repo are changed. */\n        seaf_debug (\"Both origin and virtual repo are changed.\\n\");\n        MergeOptions opt;\n        const char *roots[3];\n\n        memset (&opt, 0, sizeof(opt));\n        opt.n_ways = 3;\n        memcpy (opt.remote_repo_id, repo_id, 36);\n        memcpy (opt.remote_head, head->commit_id, 40);\n        opt.do_merge = TRUE;\n\n        roots[0] = base_root; /* base */\n        roots[1] = orig_root; /* head */\n        roots[2] = root;  /* remote */\n\n        /* Merge virtual into origin */\n        if (seaf_merge_trees (orig_repo->store_id, orig_repo->version,\n                              3, roots, &opt) < 0) {\n            seaf_warning (\"Failed to merge virtual repo %.10s.\\n\", repo_id);\n            ret = -1;\n            goto out;\n        }\n\n        seaf_debug (\"Number of dirs visted in merge: %d.\\n\", opt.visit_dirs);\n\n        /* Update virtual repo root. */\n        ret = seaf_repo_manager_update_dir (mgr,\n                                            repo_id,\n                                            \"/\",\n                                            opt.merged_tree_root,\n                                            orig_head->creator_name,\n                                            head->commit_id,\n                                            NULL,\n                                            NULL);\n        if (ret < 0) {\n            seaf_warning (\"Failed to update root of virtual repo %.10s.\\n\",\n                          repo_id);\n            goto out;\n        }\n\n        /* Update origin repo path. */\n        ret = seaf_repo_manager_update_dir (mgr,\n                                            vinfo->origin_repo_id,\n                                            vinfo->path,\n                                            opt.merged_tree_root,\n                                            head->creator_name,\n                                            orig_head->commit_id,\n                                            new_base_commit,\n                                            NULL);\n        if (ret < 0) {\n            seaf_warning (\"Failed to update origin repo %.10s path %s.\\n\",\n                          vinfo->origin_repo_id, vinfo->path);\n            goto out;\n        }\n\n        set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path);\n\n        seaf_repo_manager_cleanup_virtual_repos (mgr, vinfo->origin_repo_id);\n        seaf_repo_manager_merge_virtual_repo (mgr,\n                                              vinfo->origin_repo_id,\n                                              repo_id);\n    }\n\nout:\n    if (error)\n        g_clear_error (&error);\n    seaf_repo_unref (repo);\n    seaf_repo_unref (orig_repo);\n    seaf_commit_unref (head);\n    seaf_commit_unref (orig_head);\n    seaf_commit_unref (base);\n    g_free (base_root);\n    g_free (orig_root);\n    return vtask;\n}\n\nstatic void merge_virtual_repo_done (void *vtask)\n{\n    MergeTask *task = vtask;\n\n    seaf_debug (\"Task %.8s done.\\n\", task->repo_id);\n\n    g_hash_table_remove (scheduler->running, task->repo_id);\n}\n\nstatic int\nschedule_merge_tasks (void *vscheduler)\n{\n    MergeScheduler *scheduler = vscheduler;\n    int n_running = g_hash_table_size (scheduler->running);\n    MergeTask *task;\n\n    /* seaf_debug (\"Waiting tasks %d, running tasks %d.\\n\", */\n    /*             g_queue_get_length (scheduler->queue), n_running); */\n\n    if (n_running >= MAX_RUNNING_TASKS)\n        return TRUE;\n\n    pthread_mutex_lock (&scheduler->q_lock);\n\n    while (n_running < MAX_RUNNING_TASKS) {\n        task = g_queue_pop_head (scheduler->queue);\n        if (!task)\n            break;\n\n        if (!g_hash_table_lookup (scheduler->running, task->repo_id)) {\n            int ret = ccnet_job_manager_schedule_job (scheduler->tpool,\n                                                      merge_virtual_repo,\n                                                      merge_virtual_repo_done,\n                                                      task);\n            if (ret < 0) {\n                g_queue_push_tail (scheduler->queue, task);\n                break;\n            }\n\n            g_hash_table_insert (scheduler->running,\n                                 g_strdup(task->repo_id),\n                                 task);\n            n_running++;\n\n            seaf_debug (\"Run task for repo %.8s.\\n\", task->repo_id);\n        } else {\n            seaf_debug (\"A task for repo %.8s is already running.\\n\", task->repo_id);\n\n            g_queue_push_tail (scheduler->queue, task);\n            break;\n        }\n    }\n\n    pthread_mutex_unlock (&scheduler->q_lock);\n\n    return TRUE;\n}\n\nstatic gint task_cmp (gconstpointer a, gconstpointer b)\n{\n    const MergeTask *task_a = a;\n    const MergeTask *task_b = b;\n\n    return strcmp (task_a->repo_id, task_b->repo_id);\n}\n\nstatic void\nadd_merge_task (const char *repo_id)\n{\n    MergeTask *task = g_new0 (MergeTask, 1);\n\n    seaf_debug (\"Add merge task for repo %.8s.\\n\", repo_id);\n\n    memcpy (task->repo_id, repo_id, 36);\n\n    pthread_mutex_lock (&scheduler->q_lock);\n\n    if (g_queue_find_custom (scheduler->queue, task, task_cmp) != NULL) {\n        seaf_debug (\"Task for repo %.8s is already queued.\\n\", repo_id);\n        g_free (task);\n    } else\n        g_queue_push_tail (scheduler->queue, task);\n\n    pthread_mutex_unlock (&scheduler->q_lock);\n}\n\nint\nseaf_repo_manager_init_merge_scheduler ()\n{\n    scheduler = g_new0 (MergeScheduler, 1);\n    if (!scheduler)\n        return -1;\n\n    pthread_mutex_init (&scheduler->q_lock, NULL);\n\n    scheduler->queue = g_queue_new ();\n    scheduler->running = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                g_free, g_free);\n\n    scheduler->tpool = ccnet_job_manager_new (MAX_RUNNING_TASKS);\n    scheduler->timer = ccnet_timer_new (schedule_merge_tasks,\n                                        scheduler,\n                                        SCHEDULE_INTERVAL);\n    return 0;\n}\n\nint\nseaf_repo_manager_repair_virtual_repo (char *repo_id)\n{\n    SeafRepoManager *mgr = seaf->repo_mgr;\n    SeafVirtRepo *vinfo = NULL;\n    SeafRepo *repo = NULL, *orig_repo = NULL;\n    SeafCommit *head = NULL, *orig_head = NULL;\n    char *root = NULL, *orig_root = NULL;\n    char new_base_commit[41] = {0};\n    int ret = 0;\n    GError *error = NULL;\n\n    /* repos */\n    repo = seaf_repo_manager_get_repo (mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get virt repo %.10s.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    if (!repo->virtual_info) {\n        seaf_warning (\"Repo %.10s is not a virtual repo.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    vinfo = seaf_repo_manager_get_virtual_repo_info (mgr, repo_id);\n    if (!vinfo) {\n        seaf_warning (\"Failed to get virt repo info %.10s.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    orig_repo = seaf_repo_manager_get_repo (mgr, vinfo->origin_repo_id);\n    if (!orig_repo) {\n        seaf_warning (\"Failed to get orig repo %.10s.\\n\", vinfo->origin_repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    /* commits */\n    head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                           repo->id, repo->version,\n                                           repo->head->commit_id);\n    if (!head) {\n        seaf_warning (\"Failed to get virtual repo commit %s:%.8s.\\n\",\n                      repo->id, repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    orig_head = seaf_commit_manager_get_commit (seaf->commit_mgr,\n                                                orig_repo->id, orig_repo->version,\n                                                orig_repo->head->commit_id);\n    if (!orig_head) {\n        seaf_warning (\"Failed to get origin repo commit %s:%.8s.\\n\",\n                      orig_repo->id, orig_repo->head->commit_id);\n        ret = -1;\n        goto out;\n    }\n\n    orig_root = seaf_fs_manager_get_seafdir_id_by_path (seaf->fs_mgr,\n                                                        orig_repo->store_id,\n                                                        orig_repo->version,\n                                                        orig_head->root_id,\n                                                        vinfo->path,\n                                                        &error);\n    if (error &&\n        !g_error_matches(error,\n                         SEAFILE_DOMAIN,\n                         SEAF_ERR_PATH_NO_EXIST)) {\n        seaf_warning (\"Failed to get seafdir id by path in origin repo %.10s: %s.\\n\", orig_repo->store_id, error->message);\n        ret = -1;\n        goto out;\n    }\n    if (!orig_root) {\n        seaf_message(\"Path %s not found in origin repo %.8s, delete or rename virtual repo %.8s\\n\",\n                    vinfo->path, vinfo->origin_repo_id, repo_id);\n\n        goto out;\n    }\n\n    /* fs roots */\n    root = head->root_id;\n\n    MergeOptions opt;\n    const char *roots[2];\n\n    memset (&opt, 0, sizeof(opt));\n    opt.n_ways = 2;\n    memcpy (opt.remote_repo_id, repo_id, 36);\n    memcpy (opt.remote_head, head->commit_id, 40);\n\n    roots[0] = orig_root;\n    roots[1] = root;\n\n    /* Merge virtual into origin */\n    if (seaf_merge_trees (orig_repo->store_id, orig_repo->version,\n                          2, roots, &opt) < 0) {\n        seaf_warning (\"Failed to merge virtual repo %.10s.\\n\", repo_id);\n        ret = -1;\n        goto out;\n    }\n\n    seaf_debug (\"Number of dirs visted in merge: %d.\\n\", opt.visit_dirs);\n\n    /* Update virtual repo root. */\n    ret = seaf_repo_manager_update_dir (mgr,\n                                        repo_id,\n                                        \"/\",\n                                        opt.merged_tree_root,\n                                        orig_head->creator_name,\n                                        head->commit_id,\n                                        NULL,\n                                        NULL);\n    if (ret < 0) {\n        seaf_warning (\"Failed to update root of virtual repo %.10s.\\n\",\n                      repo_id);\n        goto out;\n    }\n\n    /* Update origin repo path. */\n    ret = seaf_repo_manager_update_dir (mgr,\n                                        vinfo->origin_repo_id,\n                                        vinfo->path,\n                                        opt.merged_tree_root,\n                                        head->creator_name,\n                                        orig_head->commit_id,\n                                        new_base_commit,\n                                        NULL);\n    if (ret < 0) {\n        seaf_warning (\"Failed to update origin repo %.10s path %s.\\n\",\n                      vinfo->origin_repo_id, vinfo->path);\n        goto out;\n    }\n\n    set_virtual_repo_base_commit_path (repo->id, new_base_commit, vinfo->path);\n\nout:\n    if (error)\n        g_clear_error (&error);\n    seaf_virtual_repo_info_free (vinfo);\n    seaf_repo_unref (repo);\n    seaf_repo_unref (orig_repo);\n    seaf_commit_unref (head);\n    seaf_commit_unref (orig_head);\n    g_free (orig_root);\n    return ret;\n}\n"
  },
  {
    "path": "server/web-accesstoken-mgr.c",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#include \"common.h\"\n\n#include <timer.h>\n\n#include <pthread.h>\n\n#include \"seafile-session.h\"\n#include \"web-accesstoken-mgr.h\"\n#include \"seafile-error.h\"\n\n#include \"utils.h\"\n\n#include \"log.h\"\n\n#define CLEANING_INTERVAL_MSEC 1000*300\t/* 5 minutes */\n#define TOKEN_EXPIRE_TIME 3600\t        /* 1 hour */\n#define TOKEN_LEN 36\n\nstruct WebATPriv {\n    GHashTable\t\t*access_token_hash; /* token -> access info */\n    pthread_mutex_t lock;\n\n    gboolean cluster_mode;\n    struct ObjCache *cache;\n};\ntypedef struct WebATPriv WebATPriv;\n\n/* #define DEBUG 1 */\n\ntypedef struct {\n    char *repo_id;\n    char *obj_id;\n    char *op;\n    char *username;\n    long expire_time;\n    gboolean use_onetime;\n} AccessInfo;\n\nstatic void\nfree_access_info (AccessInfo *info)\n{\n    if (!info)\n        return;\n\n    g_free (info->repo_id);\n    g_free (info->obj_id);\n    g_free (info->op);\n    g_free (info->username);\n    g_free (info);\n}\n\nSeafWebAccessTokenManager*\nseaf_web_at_manager_new (SeafileSession *session)\n{\n    SeafWebAccessTokenManager *mgr = g_new0 (SeafWebAccessTokenManager, 1);\n\n    mgr->seaf = session;\n\n    mgr->priv = g_new0(WebATPriv, 1);\n    mgr->priv->access_token_hash = g_hash_table_new_full (g_str_hash, g_str_equal,\n                                                    g_free,\n                                                    (GDestroyNotify)free_access_info);\n    pthread_mutex_init (&mgr->priv->lock, NULL);\n\n    return mgr;\n}\n\nstatic gboolean\nremove_expire_info (gpointer key, gpointer value, gpointer user_data)\n{\n    AccessInfo *info = (AccessInfo *)value;\n    long now = *((long*)user_data);\n\n    if (info && now >= info->expire_time) {\n        return TRUE;\n    }\n\n    return FALSE;\n}\n\nstatic int\nclean_pulse (void *vmanager)\n{\n    SeafWebAccessTokenManager *manager = vmanager;\n    long now = (long)time(NULL);\n\n    pthread_mutex_lock (&manager->priv->lock);\n\n    g_hash_table_foreach_remove (manager->priv->access_token_hash,\n                                 remove_expire_info, &now);\n\n    pthread_mutex_unlock (&manager->priv->lock);\n    \n    return TRUE;\n}\n\nint\nseaf_web_at_manager_start (SeafWebAccessTokenManager *mgr)\n{\n    ccnet_timer_new (clean_pulse, mgr, CLEANING_INTERVAL_MSEC);\n\n    return 0;\n}\n\nstatic char *\ngen_new_token (GHashTable *token_hash)\n{\n    char uuid[37];\n    char *token;\n\n    while (1) {\n        gen_uuid_inplace (uuid);\n        token = g_strndup(uuid, TOKEN_LEN);\n\n        /* Make sure the new token doesn't conflict with an existing one. */\n        if (g_hash_table_lookup (token_hash, token) != NULL)\n            g_free (token);\n        else\n            return token;\n    }\n}\n\nchar *\nseaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr,\n                                      const char *repo_id,\n                                      const char *obj_id,\n                                      const char *op,\n                                      const char *username,\n                                      int use_onetime,\n                                      GError **error)\n{\n    AccessInfo *info;\n    long now = (long)time(NULL);\n    long expire;\n    char *t;\n    SeafileWebAccess *webaccess;\n\n    if (strcmp(op, \"view\") != 0 &&\n        strcmp(op, \"download\") != 0 &&\n        strcmp(op, \"downloadblks\") != 0 &&\n        strcmp(op, \"download-dir\") != 0 &&\n        strcmp(op, \"download-multi\") != 0 &&\n        strcmp(op, \"download-link\") != 0 &&\n        strcmp(op, \"download-dir-link\") != 0 &&\n        strcmp(op, \"download-multi-link\") != 0 &&\n        strcmp(op, \"upload\") != 0 &&\n        strcmp(op, \"update\") != 0 &&\n        strcmp(op, \"upload-link\") != 0 &&\n        strcmp(op, \"upload-blks-api\") != 0 &&\n        strcmp(op, \"upload-blks-aj\") != 0 &&\n        strcmp(op, \"update-blks-api\") != 0 &&\n        strcmp(op, \"update-blks-aj\") != 0) {\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Invalid operation type.\");\n        return NULL;\n    }\n\n    pthread_mutex_lock (&mgr->priv->lock);\n\n    t = gen_new_token (mgr->priv->access_token_hash);\n    expire = now + seaf->web_token_expire_time;\n\n    info = g_new0 (AccessInfo, 1);\n    info->repo_id = g_strdup (repo_id);\n    info->obj_id = g_strdup (obj_id);\n    info->op = g_strdup (op);\n    info->username = g_strdup (username);\n    info->expire_time = expire;\n    if (use_onetime) {\n        info->use_onetime = TRUE;\n    }\n\n    g_hash_table_insert (mgr->priv->access_token_hash, g_strdup(t), info);\n\n    pthread_mutex_unlock (&mgr->priv->lock);\n\n#ifdef HAVE_EVHTP\n    if (!seaf->go_fileserver) {\n        if (strcmp(op, \"download-dir\") == 0 ||\n            strcmp(op, \"download-multi\") == 0 ||\n            strcmp(op, \"download-dir-link\") == 0 ||\n            strcmp(op, \"download-multi-link\") == 0) {\n\n            webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS,\n                                      \"repo_id\", info->repo_id,\n                                      \"obj_id\", info->obj_id,\n                                      \"op\", info->op,\n                                      \"username\", info->username,\n                                      NULL);\n\n            if (zip_download_mgr_start_zip_task (seaf->zip_download_mgr,\n                                                 t, webaccess, error) < 0) {\n                pthread_mutex_lock (&mgr->priv->lock);\n                g_hash_table_remove (mgr->priv->access_token_hash, t);\n                pthread_mutex_unlock (&mgr->priv->lock);\n\n                g_object_unref (webaccess);\n                g_free (t);\n                return NULL;\n            }\n            g_object_unref (webaccess);\n        }\n    }\n#endif\n\n    return t;\n}\n\nSeafileWebAccess *\nseaf_web_at_manager_query_access_token (SeafWebAccessTokenManager *mgr,\n                                        const char *token)\n{\n    SeafileWebAccess *webaccess;\n    AccessInfo *info;\n\n    pthread_mutex_lock (&mgr->priv->lock);\n    info = g_hash_table_lookup (mgr->priv->access_token_hash, token);\n    pthread_mutex_unlock (&mgr->priv->lock);\n\n    if (info != NULL) {\n        long expire_time = info->expire_time;\n        long now = (long)time(NULL);        \n\n        if (now - expire_time >= 0) {\n            return NULL;\n        } else {\n            webaccess = g_object_new (SEAFILE_TYPE_WEB_ACCESS,\n                                      \"repo_id\", info->repo_id,\n                                      \"obj_id\", info->obj_id,\n                                      \"op\", info->op,\n                                      \"username\", info->username,\n                                      NULL);\n\n            if (info->use_onetime) {\n                pthread_mutex_lock (&mgr->priv->lock);\n                g_hash_table_remove (mgr->priv->access_token_hash, token);\n                pthread_mutex_unlock (&mgr->priv->lock);\n            }\n\n            return webaccess;\n        }\n    }\n\n    return NULL;\n}\n"
  },
  {
    "path": "server/web-accesstoken-mgr.h",
    "content": "/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */\n\n#ifndef WEB_ACCESSTOKEN_MGR_H\n#define WEB_ACCESSTOKEN_MGR_H\n\nstruct _SeafileSession;\n\nstruct WebATPriv;\n\nstruct _SeafWebAccessTokenManager {\n    struct _SeafileSession\t*seaf;\n    struct WebATPriv *priv;\n};\ntypedef struct _SeafWebAccessTokenManager SeafWebAccessTokenManager;\n\nSeafWebAccessTokenManager* seaf_web_at_manager_new (struct _SeafileSession *seaf);\n\nint\nseaf_web_at_manager_start (SeafWebAccessTokenManager *mgr);\n\n/*\n * Returns an access token for the given access info.\n * If a token doesn't exist or has expired, generate and return a new one.\n */\nchar *\nseaf_web_at_manager_get_access_token (SeafWebAccessTokenManager *mgr,\n                                      const char *repo_id,\n                                      const char *obj_id,\n                                      const char *op,\n                                      const char *username,\n                                      int use_onetime,\n                                      GError **error);\n\n/*\n * Returns access info for the given token.\n */\nSeafileWebAccess *\nseaf_web_at_manager_query_access_token (SeafWebAccessTokenManager *mgr,\n                                        const char *token);\n\n#endif /* WEB_ACCESSTOKEN_MGR_H */\n\n"
  },
  {
    "path": "server/zip-download-mgr.c",
    "content": "#include \"common.h\"\n\n#ifdef HAVE_EVHTP\n#include <pthread.h>\n#include <jansson.h>\n\n#include <timer.h>\n#include \"utils.h\"\n#include \"log.h\"\n#include \"seafile-error.h\"\n#include \"seafile-session.h\"\n#include \"pack-dir.h\"\n#include \"web-accesstoken-mgr.h\"\n#include \"zip-download-mgr.h\"\n\n#define MAX_ZIP_THREAD_NUM 5\n#define SCAN_PROGRESS_INTERVAL 24 * 3600 // 1 day\n#define PROGRESS_TTL 5 * 3600 // 5 hours\n#define DEFAULT_MAX_DOWNLOAD_DIR_SIZE 100 * 1000000 /* 100MB */\n\ntypedef struct ZipDownloadMgrPriv {\n    pthread_mutex_t progress_lock;\n    GHashTable *progress_store;\n    GThreadPool *zip_tpool;\n    // Abnormal behavior lead to no download request for the zip finished progress,\n    // so related progress will not be removed,\n    // this timer is used to scan progress and remove invalid progress.\n    CcnetTimer *scan_progress_timer;\n} ZipDownloadMgrPriv;\n\nvoid\nfree_progress (Progress *progress)\n{\n    if (!progress)\n        return;\n\n    if (g_file_test (progress->zip_file_path, G_FILE_TEST_EXISTS)) {\n        g_unlink (progress->zip_file_path);\n    }\n    g_free (progress->zip_file_path);\n    g_free (progress);\n}\n\ntypedef enum DownloadType {\n    DOWNLOAD_DIR,\n    DOWNLOAD_MULTI\n} DownloadType;\n\ntypedef struct DownloadObj {\n    char *token;\n    DownloadType type;\n    SeafRepo *repo;\n    char *user;\n    gboolean is_windows;\n    // download-dir: top dir name; download-multi: \"\"\n    char *dir_name;\n    // download-dir: obj_id; download-multi: dirent list\n    void *internal;\n    Progress *progress;\n} DownloadObj;\n\nstatic void\nfree_download_obj (DownloadObj *obj)\n{\n    if (!obj)\n        return;\n\n    g_free (obj->token);\n    seaf_repo_unref (obj->repo);\n    g_free (obj->user);\n    g_free (obj->dir_name);\n    if (obj->type == DOWNLOAD_DIR) {\n        g_free ((char *)obj->internal);\n    } else {\n        g_list_free_full ((GList *)obj->internal, (GDestroyNotify)seaf_dirent_free);\n    }\n    g_free (obj);\n}\n\nstatic void\nstart_zip_task (gpointer data, gpointer user_data);\n\nstatic int\nscan_progress (void *data);\n\nstatic int\nget_download_file_count (DownloadObj *obj, GError **error);\n\nstatic gboolean\nvalidate_download_size (DownloadObj *obj, GError **error);\n\nZipDownloadMgr *\nzip_download_mgr_new ()\n{\n    GError *error = NULL;\n    ZipDownloadMgr *mgr = g_new0 (ZipDownloadMgr, 1);\n    ZipDownloadMgrPriv *priv = g_new0 (ZipDownloadMgrPriv, 1);\n\n    priv->zip_tpool = g_thread_pool_new (start_zip_task, priv, MAX_ZIP_THREAD_NUM, FALSE, &error);\n    if (!priv->zip_tpool) {\n        if (error) {\n            seaf_warning (\"Failed to create zip task thread pool: %s.\\n\", error->message);\n            g_clear_error (&error);\n        } else {\n            seaf_warning (\"Failed to create zip task thread pool.\\n\");\n        }\n        g_free (priv);\n        g_free (mgr);\n        return NULL;\n    }\n\n    pthread_mutex_init (&priv->progress_lock, NULL);\n    priv->progress_store = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,\n                                                  (GDestroyNotify)free_progress);\n    priv->scan_progress_timer = ccnet_timer_new (scan_progress, priv,\n                                                 SCAN_PROGRESS_INTERVAL * 1000);\n    mgr->priv = priv;\n\n    return mgr;\n}\n\nstatic void\nremove_progress_by_token (ZipDownloadMgrPriv *priv, const char *token)\n{\n    pthread_mutex_lock (&priv->progress_lock);\n    g_hash_table_remove (priv->progress_store, token);\n    pthread_mutex_unlock (&priv->progress_lock);\n}\n\nstatic int\nscan_progress (void *data)\n{\n    time_t now = time(NULL);\n    ZipDownloadMgrPriv *priv = data;\n    GHashTableIter iter;\n    gpointer key, value;\n    Progress *progress;\n\n    pthread_mutex_lock (&priv->progress_lock);\n\n    g_hash_table_iter_init (&iter, priv->progress_store);\n    while (g_hash_table_iter_next (&iter, &key, &value)) {\n        progress = value;\n        if (now >= progress->expire_ts) {\n            g_hash_table_iter_remove (&iter);\n        }\n    }\n\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    return TRUE;\n}\n\nstatic SeafileCrypt *\nget_seafile_crypt (SeafRepo *repo, const char *user)\n{\n    SeafileCryptKey *key = NULL;\n    char *key_hex, *iv_hex;\n    unsigned char enc_key[32], enc_iv[16];\n    SeafileCrypt *crypt = NULL;\n\n    key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr,\n                                               repo->id, user);\n    if (!key) {\n        seaf_warning (\"Failed to get derypt key for repo %.8s.\\n\", repo->id);\n        return NULL;\n    }\n\n    g_object_get (key, \"key\", &key_hex, \"iv\", &iv_hex, NULL);\n    if (repo->enc_version == 1)\n        hex_to_rawdata (key_hex, enc_key, 16);\n    else\n        hex_to_rawdata (key_hex, enc_key, 32);\n    hex_to_rawdata (iv_hex, enc_iv, 16);\n    crypt = seafile_crypt_new (repo->enc_version, enc_key, enc_iv);\n    g_free (key_hex);\n    g_free (iv_hex);\n    g_object_unref (key);\n\n    return crypt;\n}\n\nstatic void\nstart_zip_task (gpointer data, gpointer user_data)\n{\n    DownloadObj *obj = data;\n    ZipDownloadMgrPriv *priv = user_data;\n    SeafRepo *repo = obj->repo;\n    SeafileCrypt *crypt = NULL;\n    int ret = 0;\n\n    if (repo->encrypted) {\n        crypt = get_seafile_crypt (repo, obj->user);\n        if (!crypt) {\n            ret = -1;\n            goto out;\n        }\n    }\n\n    if (!validate_download_size (obj, NULL)) {\n        ret = -1;\n        obj->progress->size_too_large = TRUE;\n        goto out;\n    }\n\n    int file_count = get_download_file_count (obj, NULL);\n    if (file_count < 0) {\n        ret = -1;\n        goto out;\n    }\n    obj->progress->total = file_count;\n\n    ret = pack_files (repo->store_id, repo->version, obj->dir_name,\n                      obj->internal, crypt, obj->is_windows, obj->progress);\n\nout:\n    if (crypt) {\n        g_free (crypt);\n    }\n    if (ret == -1 && !obj->progress->canceled &&\n        !obj->progress->size_too_large) {\n        obj->progress->internal_error = TRUE;\n    }\n    free_download_obj (obj);\n}\n\nstatic int\nparse_download_dir_data (DownloadObj *obj, const char *data)\n{\n    json_t *jobj;\n    json_error_t jerror;\n    const char *dir_name;\n    const char *obj_id;\n\n    jobj = json_loadb (data, strlen(data), 0, &jerror);\n    if (!jobj) {\n        seaf_warning (\"Failed to parse download dir data: %s.\\n\", jerror.text);\n        return -1;\n    }\n\n    obj->is_windows = json_object_get_int_member (jobj, \"is_windows\");\n\n    dir_name = json_object_get_string_member (jobj, \"dir_name\");\n    if (!dir_name || strcmp (dir_name, \"\") == 0) {\n        seaf_warning (\"Invalid download dir data: miss dir_name filed.\\n\");\n        json_decref (jobj);\n        return -1;\n    }\n\n    obj_id = json_object_get_string_member (jobj, \"obj_id\");\n    if (!obj_id || strcmp (obj_id, \"\") == 0) {\n        seaf_warning (\"Invalid download dir data: miss obj_id filed.\\n\");\n        json_decref (jobj);\n        return -1;\n    }\n\n    obj->dir_name = g_strdup (dir_name);\n    obj->internal = g_strdup (obj_id);\n\n    json_decref (jobj);\n\n    return 0;\n}\n\nstatic int\nparse_download_multi_data (DownloadObj *obj, const char *data)\n{\n    json_t *jobj;\n    SeafRepo *repo = obj->repo;\n    const char *tmp_parent_dir;\n    char *parent_dir;\n    json_t *name_array;\n    json_error_t jerror;\n    int i;\n    int len;\n    const char *file_name;\n    SeafDirent *dirent;\n    SeafDir *dir;\n    GList *dirent_list = NULL, *p = NULL;\n    GError *error = NULL;\n\n    jobj = json_loadb (data, strlen(data), 0, &jerror);\n    if (!jobj) {\n        seaf_warning (\"Failed to parse download multi data: %s.\\n\", jerror.text);\n        return -1;\n    }\n\n    obj->is_windows = json_object_get_int_member (jobj, \"is_windows\");\n\n    tmp_parent_dir = json_object_get_string_member (jobj, \"parent_dir\");\n    if (!tmp_parent_dir || strcmp (tmp_parent_dir, \"\") == 0) {\n        seaf_warning (\"Invalid download multi data, miss parent_dir field.\\n\");\n        json_decref (jobj);\n        return -1;\n    }\n    name_array = json_object_get (jobj, \"file_list\");\n    if (!name_array) {\n        seaf_warning (\"Invalid download multi data, miss file_list field.\\n\");\n        json_decref (jobj);\n        return -1;\n    }\n    len = json_array_size (name_array);\n    if (len == 0) {\n        seaf_warning (\"Invalid download multi data, miss download file name.\\n\");\n        json_decref (jobj);\n        return -1;\n    }\n    parent_dir = format_dir_path (tmp_parent_dir);\n\n    dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id,\n                                               repo->version, repo->root_id, parent_dir, &error);\n    if (!dir) {\n        if (error) {\n            seaf_warning (\"Failed to get dir %s repo %.8s: %s.\\n\",\n                          parent_dir, repo->store_id, error->message);\n            g_clear_error(&error);\n        } else {\n            seaf_warning (\"dir %s doesn't exist in repo %.8s.\\n\",\n                          parent_dir, repo->store_id);\n        }\n        g_free (parent_dir);\n        json_decref (jobj);\n        return -1;\n    }\n    GHashTable *dirent_hash = g_hash_table_new(g_str_hash, g_str_equal);\n    for (p = dir->entries; p; p = p->next) {\n        SeafDirent *d = p->data;\n        g_hash_table_insert(dirent_hash, d->name, d);\n    }\n\n    for (i = 0; i < len; i++) {\n        file_name = json_string_value (json_array_get (name_array, i));\n        if (strcmp (file_name, \"\") == 0) {\n            seaf_warning (\"Invalid download file name: %s.\\n\", file_name);\n            if (dirent_list) {\n                g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n                dirent_list = NULL;\n            }\n            break;\n        }\n\n        // Packing files in multi-level directories.\n        if (strchr (file_name, '/') != NULL) {\n            char *fullpath = g_build_path (\"/\", parent_dir, file_name, NULL);\n            dirent = seaf_fs_manager_get_dirent_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, fullpath, &error);\n            if (!dirent) {\n                if (error) {\n                    seaf_warning (\"Failed to get path %s repo %.8s: %s.\\n\",\n                                  fullpath, repo->store_id, error->message);\n                    g_clear_error(&error);\n                } else {\n                    seaf_warning (\"Path %s doesn't exist in repo %.8s.\\n\",\n                                  parent_dir, repo->store_id);\n                }\n                if (dirent_list) {\n                    g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n                    dirent_list = NULL;\n                }\n                g_free (fullpath);\n                break;\n            }\n            g_free (fullpath);\n            dirent_list = g_list_prepend (dirent_list, dirent);\n        } else {\n            dirent = g_hash_table_lookup (dirent_hash, file_name);\n            if (!dirent) {\n                seaf_warning (\"Failed to get dirent for %s in dir %s in repo %.8s.\\n\",\n                               file_name, parent_dir, repo->store_id);\n                if (dirent_list) {\n                    g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n                    dirent_list = NULL;\n                }\n                break;\n            }\n\n            dirent_list = g_list_prepend (dirent_list, seaf_dirent_dup(dirent));\n        }\n    }\n\n    g_hash_table_unref(dirent_hash);\n    g_free (parent_dir);\n    json_decref (jobj);\n    seaf_dir_free (dir);\n\n    if (!dirent_list) {\n        return -1;\n    }\n    obj->dir_name = g_strdup (\"\");\n    obj->internal = dirent_list;\n    return 0;\n}\n\nstatic gint64\ncalcuate_download_multi_size (SeafRepo *repo, GList *dirent_list)\n{\n    GList *iter = dirent_list;\n    SeafDirent *dirent;\n    gint64 size;\n    gint64 total_size = 0;\n\n    for (; iter; iter = iter->next) {\n        dirent = iter->data;\n        if (S_ISREG(dirent->mode)) {\n            if (repo->version > 0) {\n                size = dirent->size;\n            } else {\n                size = seaf_fs_manager_get_file_size (seaf->fs_mgr, repo->store_id,\n                                                      repo->version, dirent->id);\n            }\n            if (size < 0) {\n                seaf_warning (\"Failed to get file %s size.\\n\", dirent->name);\n                return -1;\n            }\n            total_size += size;\n        } else if (S_ISDIR(dirent->mode)) {\n            size = seaf_fs_manager_get_fs_size (seaf->fs_mgr, repo->store_id,\n                                                repo->version, dirent->id);\n            if (size < 0) {\n                seaf_warning (\"Failed to get dir %s size.\\n\", dirent->name);\n                return -1;\n            }\n            total_size += size;\n        }\n    }\n\n    return total_size;\n}\n\nstatic int\ncalcuate_download_multi_file_count (SeafRepo *repo, GList *dirent_list)\n{\n    GList *iter = dirent_list;\n    SeafDirent *dirent;\n    int cur_count;\n    int count = 0;\n\n    for (; iter; iter = iter->next) {\n        dirent = iter->data;\n        if (S_ISREG(dirent->mode)) {\n            count += 1;\n        } else if (S_ISDIR(dirent->mode)) {\n            cur_count = seaf_fs_manager_count_fs_files (seaf->fs_mgr, repo->store_id,\n                                                        repo->version, dirent->id);\n            if (cur_count < 0) {\n                seaf_warning (\"Failed to get dir %s file count.\\n\", dirent->name);\n                return -1;\n            }\n            count += cur_count;\n        }\n    }\n\n    return count;\n}\n\nstatic gboolean\nvalidate_download_size (DownloadObj *obj, GError **error)\n{\n    SeafRepo *repo = obj->repo;\n    gint64 download_size;\n    gint64 max_download_dir_size;\n\n    if (obj->type == DOWNLOAD_DIR) {\n        download_size = seaf_fs_manager_get_fs_size (seaf->fs_mgr,\n                                                     repo->store_id, repo->version,\n                                                     (char *)obj->internal);\n    } else {\n        download_size = calcuate_download_multi_size (repo, (GList *)obj->internal);\n    }\n\n    /* default is MB */\n    max_download_dir_size = seaf_cfg_manager_get_config_int64 (seaf->cfg_mgr, \"fileserver\",\n                                                               \"max_download_dir_size\");\n    if (max_download_dir_size > 0)\n        max_download_dir_size = max_download_dir_size * 1000000;\n    else\n        max_download_dir_size = DEFAULT_MAX_DOWNLOAD_DIR_SIZE;\n\n    if (download_size < 0) {\n        seaf_warning (\"Failed to get download size.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get download size.\");\n        return FALSE;\n    } else if (download_size > max_download_dir_size) {\n        seaf_warning (\"Total download size %\"G_GINT64_FORMAT\n                      \", exceed max download dir size %\"G_GINT64_FORMAT\".\\n\",\n                      download_size, max_download_dir_size);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Download size exceed max download dir size.\");\n        return FALSE;\n    }\n\n    return TRUE;\n}\n\nstatic int\nget_download_file_count (DownloadObj *obj, GError **error)\n{\n    int file_count;\n    SeafRepo *repo = obj->repo;\n\n    if (obj->type == DOWNLOAD_DIR) {\n        file_count = seaf_fs_manager_count_fs_files (seaf->fs_mgr, repo->store_id,\n                                                     repo->version, (char *)obj->internal);\n    } else {\n        file_count = calcuate_download_multi_file_count (repo, (GList *)obj->internal);\n    }\n\n    if (file_count < 0) {\n        seaf_warning (\"Failed to get download file count.\\n\");\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get download file count.\");\n        return -1;\n    }\n\n    return file_count;\n}\n\nint\nzip_download_mgr_start_zip_task (ZipDownloadMgr *mgr,\n                                 const char *token,\n                                 SeafileWebAccess *info,\n                                 GError **error)\n{\n    const char *repo_id;\n    const char *data;\n    const char *operation;\n    SeafRepo *repo;\n    DownloadObj *obj;\n    Progress *progress;\n    int ret = 0;\n    ZipDownloadMgrPriv *priv = mgr->priv;\n\n    repo_id = seafile_web_access_get_repo_id (info);\n    repo = seaf_repo_manager_get_repo (seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %.8s.\\n\", repo_id);\n        g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                     \"Failed to get repo.\");\n        return -1;\n    }\n    data = seafile_web_access_get_obj_id (info);\n    operation = seafile_web_access_get_op (info);\n\n    obj = g_new0 (DownloadObj, 1);\n    obj->token = g_strdup (token);\n    obj->repo = repo;\n    obj->user = g_strdup (seafile_web_access_get_username (info));\n\n    if (strcmp (operation, \"download-dir\") == 0 ||\n        strcmp (operation, \"download-dir-link\") == 0) {\n        obj->type = DOWNLOAD_DIR;\n        ret = parse_download_dir_data (obj, data);\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Failed to parse download dir data.\");\n            goto out;\n        }\n        if (!seaf_fs_manager_object_exists (seaf->fs_mgr,\n                                            repo->store_id, repo->version,\n                                            (char *)obj->internal)) {\n            seaf_warning (\"Dir %s doesn't exist.\\n\", (char *)obj->internal);\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Dir doesn't exist.\");\n            ret = -1;\n            goto out;\n        }\n    } else {\n        obj->type = DOWNLOAD_MULTI;\n        ret = parse_download_multi_data (obj, data);\n        if (ret < 0) {\n            g_set_error (error, SEAFILE_DOMAIN, SEAF_ERR_GENERAL,\n                         \"Failed to parse download multi data.\");\n            goto out;\n        }\n    }\n\n    progress = g_new0 (Progress, 1);\n    /* Set to real total in worker thread. Here to just prevent the client from thinking\n     * the zip has been finished too early.\n     */\n    progress->total = 1;\n    progress->expire_ts = time(NULL) + PROGRESS_TTL;\n    obj->progress = progress;\n\n    pthread_mutex_lock (&priv->progress_lock);\n    g_hash_table_replace (priv->progress_store, g_strdup (token), progress);\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    g_thread_pool_push (priv->zip_tpool, obj, NULL);\n\nout:\n    if (ret < 0) {\n        free_download_obj (obj);\n    }\n\n    return ret;\n}\n\n/*\n#define TOKEN_LEN 36\nstatic char *\ngen_new_token (GHashTable *token_hash)\n{\n    char uuid[37];\n    char *token;\n\n    while (1) {\n        gen_uuid_inplace (uuid);\n        token = g_strndup(uuid, TOKEN_LEN);\n\n        // Make sure the new token doesn't conflict with an existing one.\n        if (g_hash_table_lookup (token_hash, token) != NULL)\n            g_free (token);\n        else\n            return token;\n    }\n}\n\nchar *\nzip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr,\n                                    const char *repo_id,\n                                    const char *operation,\n                                    const char *user,\n                                    GList *dirent_list)\n{\n    SeafRepo *repo = NULL;\n    char *token = NULL;\n    char *task_id = NULL;\n    char *filename = NULL;\n    DownloadObj *obj;\n    Progress *progress;\n    ZipDownloadMgrPriv *priv = mgr->priv;\n\n    repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id);\n    if (!repo) {\n        seaf_warning (\"Failed to get repo %s\\n\", repo_id);\n        return NULL;\n    }\n\n    obj = g_new0 (DownloadObj, 1);\n    obj->repo = repo;\n    obj->user = g_strdup (user);\n\n    if (strcmp (operation, \"download-dir\") == 0 ||\n        strcmp (operation, \"download-dir-link\") == 0) {\n        obj->type = DOWNLOAD_DIR;\n        SeafDirent *dent = dirent_list->data;\n        obj->dir_name = g_strdup (dent->name);\n        obj->internal = g_strdup (dent->id);\n        filename = g_strdup (obj->dir_name);\n        g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free);\n    } else {\n        obj->type = DOWNLOAD_MULTI;\n        obj->dir_name = g_strdup(\"\");\n        obj->internal = dirent_list;\n        time_t now = time(NULL);\n        char date_str[11];\n        strftime(date_str, sizeof(date_str), \"%Y-%m-%d\", localtime(&now));\n        filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL);\n    }\n\n    progress = g_new0 (Progress, 1);\n    // Set to real total in worker thread. Here to just prevent the client from thinking\n    // the zip has been finished too early.\n    progress->total = 1;\n    progress->expire_ts = time(NULL) + PROGRESS_TTL;\n    progress->zip_file_name = filename;\n    obj->progress = progress;\n\n    pthread_mutex_lock (&priv->progress_lock);\n    token = gen_new_token (priv->progress_store);\n    g_hash_table_replace (priv->progress_store, token, progress);\n    pthread_mutex_unlock (&priv->progress_lock);\n    obj->token = g_strdup (token);\n    task_id = g_strdup (token);\n\n    g_thread_pool_push (priv->zip_tpool, obj, NULL);\n\n    return task_id;\n}\n*/\n\nstatic Progress *\nget_progress_obj (ZipDownloadMgrPriv *priv, const char *token)\n{\n    Progress *progress;\n\n    pthread_mutex_lock (&priv->progress_lock);\n    progress = g_hash_table_lookup (priv->progress_store, token);\n    pthread_mutex_unlock (&priv->progress_lock);\n\n    return progress;\n}\n\nchar *\nzip_download_mgr_query_zip_progress (ZipDownloadMgr *mgr,\n                                     const char *token, GError **error)\n{\n    Progress *progress;\n    json_t *obj;\n    char *info;\n\n    progress = get_progress_obj (mgr->priv, token);\n    if (!progress)\n        return NULL;\n\n    obj = json_object ();\n    json_object_set_int_member (obj, \"zipped\", g_atomic_int_get (&progress->zipped));\n    json_object_set_int_member (obj, \"total\", progress->total);\n    if (progress->size_too_large) {\n        json_object_set_int_member (obj, \"failed\", 1);\n        json_object_set_string_member (obj, \"failed_reason\", \"size too large\");\n    } else if (progress->internal_error) {\n        json_object_set_int_member (obj, \"failed\", 1);\n        json_object_set_string_member (obj, \"failed_reason\", \"internal error\");\n    } else {\n        json_object_set_int_member (obj, \"failed\", 0);\n        json_object_set_string_member (obj, \"failed_reason\", \"\");\n    }\n    if (progress->canceled)\n        json_object_set_int_member (obj, \"canceled\", 1);\n    else\n        json_object_set_int_member (obj, \"canceled\", 0);\n    \n    if (progress->size_too_large || progress->canceled || progress->internal_error)\n        remove_progress_by_token(mgr->priv, token);\n        \n    info = json_dumps (obj, JSON_COMPACT);\n    json_decref (obj);\n\n    return info;\n}\n\nchar *\nzip_download_mgr_get_zip_file_path (struct ZipDownloadMgr *mgr,\n                                    const char *token)\n{\n    Progress *progress;\n\n    progress = get_progress_obj (mgr->priv, token);\n    if (!progress) {\n        return NULL;\n    }\n    return progress->zip_file_path;\n}\n\n/*\nchar *\nzip_download_mgr_get_zip_file_name (struct ZipDownloadMgr *mgr,\n                                    const char *token)\n{\n    Progress *progress;\n\n    progress = get_progress_obj (mgr->priv, token);\n    if (!progress) {\n        return NULL;\n    }\n    return progress->zip_file_name;\n}\n*/\n\nvoid\nzip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr,\n                                   const char *token)\n{\n    remove_progress_by_token (mgr->priv, token);\n}\n\nint\nzip_download_mgr_cancel_zip_task (ZipDownloadMgr *mgr,\n                                  const char *token)\n{\n    Progress *progress = get_progress_obj (mgr->priv, token);\n    if (progress)\n        progress->canceled = TRUE;\n\n    return 0;\n}\n#endif\n"
  },
  {
    "path": "server/zip-download-mgr.h",
    "content": "#ifndef ZIP_DOWNLOAD_MGR_H\n#define ZIP_DOWNLOAD_MGR_H\n\n#ifdef HAVE_EVHTP\n\n#include \"seafile-object.h\"\n\n#define MULTI_DOWNLOAD_FILE_PREFIX \"documents-export-\"\n\nstruct ZipDownloadMgrPriv;\n\ntypedef struct ZipDownloadMgr {\n    struct ZipDownloadMgrPriv *priv;\n} ZipDownloadMgr;\n\nZipDownloadMgr *\nzip_download_mgr_new ();\n\nint\nzip_download_mgr_start_zip_task (ZipDownloadMgr *mgr,\n                                 const char *token,\n                                 SeafileWebAccess *info,\n                                 GError **error);\n\nchar *\nzip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr,\n                                    const char *repo_id,\n                                    const char *operation,\n                                    const char *user,\n                                    GList *dirent_list);\n\nchar *\nzip_download_mgr_query_zip_progress (ZipDownloadMgr *mgr,\n                                     const char *token, GError **error);\n\nchar *\nzip_download_mgr_get_zip_file_path (ZipDownloadMgr *mgr,\n                                    const char *token);\n\nchar *\nzip_download_mgr_get_zip_file_name (ZipDownloadMgr *mgr,\n                                    const char *token);\n\nvoid\nzip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr,\n                                   const char *token);\n\nint\nzip_download_mgr_cancel_zip_task (ZipDownloadMgr *mgr,\n                                  const char *token);\n#endif\n\n#endif\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/conf/ccnet.conf",
    "content": "[General]\nUSER_NAME = server\nID = 8e4b13b49ca79f35732d9f44a0804940d985627c\nNAME = server\nSERVICE_URL = http://127.0.0.1\n\n[Network]\nPORT = 10002\n\n[Client]\nPORT = 9999\n\n[Database]\nCREATE_TABLES = true\nENGINE = mysql\nHOST = 127.0.0.1\nUSER = seafile\nPASSWD = seafile\nDB = ccnet_db\nCONNECTION_CHARSET=utf8\n\n#[Database]\n#ENGINE = mysql\n#HOST = 127.0.0.1\n#USER = seafile\n#PASSWD = root\n#DB = ccnet-db\n#CREATE_TABLES=true\n"
  },
  {
    "path": "tests/conf/mykey.peer",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAuZFwgxkKQGaqYyFMxIUz1JHnZPaOgEQ+fX/jRVYbGMiHkSbX\nK9X3XUHUGEjUt8b3zW6UZJGjgyV5S08YuaN0eE5z6Q6bnuWEhkTmgZgXaybc9Hiu\ny2WAHpKj+qbXcmewE0WEys/Ov9AIe0TRXmvL6r1793VcLSzgb/aIQA2WFg97DfEA\nhGAHo5BesKRfEEvXL6ZB9cGxXP9qIy0ObTvLXlOgbYchfV4rrXJk0u9xWjRyXABv\n2Myv3fgxmGmTR+TAw2G5GCKeh9IoIuWVMGPyjSlERGMqQYymNz3NgyWFayyZ5HQS\ntihCnflOGEiMHRkOwIczB16YZhan2YqKpsjHGwIBIwKCAQEArvbXzBBLfoyvR4XM\nCb9rYgXozOh3usQAZ7MYHM2HQ0C6VahHN/WgFhl+1RF4Gv1tTKoW4nqwHJEL9oxn\nxPkzTNxBZrYAcT7NaKdc/diLG+LQVDdFuHWkrxyL+vUUR0vR5kjcSjGlrYmhmMvb\nWQaNEIbFVwhA92TTnMPfjNmcI2wRKI1K9NEKDAMIPSwW/sgkls2h4KW3Y7DooJ0k\nl0apjN/rlaR4ohZp6oMVifW8GFY43Xau+4dIrYTnvvSyvGvtB+8cWuhqqvWHRZdM\nrFjgOJoZH5l0zxt2dYW2WFiqgT7xXsvu6L+nylXktEMxC33rehYdPrd427J409A6\ncaO5cwKBgQDyrBQ8UXu7cDAktiKTwH7+pA0wNyTvKsGYw0RcFILccpxty2r5gYhI\neLFPVyjoYxwauW6vX3cSAYLKR+2PlYvkPpEvBQIJbaurx++ejez/KxYD65ZeFTfs\nKb9A08hgMxCvJmnRvojhez1OZmmmWYPT57XeZXnCiNoyJWKA0mMNvwKBgQDDwn02\no5n7ugetXIlV1PiStVogPPTBobh9jsXooQFh4fB+lsrO082hapMlbVVNG1gLzvTY\nV0oDM/AzdnC6feZlAEdM+IcruinVnMnbnhiwPVDInCJIhvmJ/XScvkTsgHwRiAss\nTlf8wH/uGXiaeVV/KMlkKRK6h54znTPq37/VpQKBgQDkziG1NuJgRTS05j3bxB/3\nZ3omJV1Wh2YTsMtswuHIiVGpWWTcnrOyC2VZb2+2iVUDQR83oycfmwZJsYg27BYu\n+SnNPzxvSiWEtTJiS00rGf7QfwoeMUNbAspEb+jPux5b/6WZ34hfkXRRO/02cagu\nMj3DDzhJtDtxG+8pAOEM9QKBgQC+KqWFiPv72UlJUpQKPJmzFpIQsD44cTbgXs7h\n+32viwbhX0irqS4nxp2SEnAfBJ6sYqS05xSyp3uftOKJRxpTfJ0I8W1drYe5kP6a\n1Bf7qUcpRzc/JAhaKWn3Wb9MJQrPM7MVGOfCVJmINgAhCCcrEa2xwX/oZnxsp1cB\na6RpIwKBgQDW15IebNwVOExTqtfh6UvIjMSrk9OoHDyjoPLI3eyPt3ujKdXFJ8qF\nCWg9ianQyE5Y8vfDI+x1YRCOwq2WapeXzkSO8CzVFHgz5kFqJQolr4+o6wr5mLLC\n+6iW9u81/X3bMAWshtNfsWbRSFLT1WNVTKRg+xO7YG/3wcyeIeqigA==\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "tests/config.py",
    "content": "USER = 'testuser@test.seafile.com'\nPASSWORD = 'testuser'\nUSER2 = 'testuser2@test.seafile.com'\nPASSWORD2 = 'testuser2'\nADMIN_USER = 'adminuser@test.seafile.com'\nADMIN_PASSWORD = 'adminuser'\n\nINACTIVE_USER = 'inactiveuser@test.seafile.com'\nINACTIVE_PASSWORD = 'inactiveuser'\n"
  },
  {
    "path": "tests/conftest.py",
    "content": "#coding: UTF-8\n\nimport logging\nimport os\n\nimport pytest\nfrom tenacity import retry, stop_after_attempt, wait_fixed\nfrom tests.config import (\n    ADMIN_PASSWORD, ADMIN_USER, INACTIVE_PASSWORD, INACTIVE_USER, PASSWORD,\n    PASSWORD2, USER, USER2\n)\nfrom tests.utils import create_and_get_repo, randstring, create_and_get_group\n\nfrom seaserv import ccnet_api, seafile_api\n\nlogger = logging.getLogger(__name__)\n\n\n@retry(wait=wait_fixed(2), stop=stop_after_attempt(10))\ndef wait_for_server():\n    seafile_api.get_repo_list(0, 1, None)\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef create_users():\n    \"\"\"\n    Create an admin user and a normal user\n    \"\"\"\n    wait_for_server()\n    logger.info('preparing users for testing')\n    ccnet_api.add_emailuser(USER, PASSWORD, is_staff=False, is_active=True)\n    ccnet_api.add_emailuser(USER2, PASSWORD2, is_staff=False, is_active=True)\n    ccnet_api.add_emailuser(\n        INACTIVE_USER, INACTIVE_PASSWORD, is_staff=False, is_active=False\n    )\n    ccnet_api.add_emailuser(\n        ADMIN_USER, ADMIN_PASSWORD, is_staff=True, is_active=True\n    )\n\n@pytest.yield_fixture(scope='function')\ndef encrypted_repo():\n    repo = create_and_get_repo(\n        'test_repo_{}'.format(randstring(10)), '', USER, passwd='123'\n    )\n    try:\n        seafile_api.post_dir(repo.id, '/', 'dir1', USER)\n        seafile_api.post_dir(repo.id, '/', 'dir2', USER)\n        seafile_api.post_dir(repo.id, '/dir1', 'subdir1', USER)\n        seafile_api.post_dir(repo.id, '/dir2', 'subdir2', USER)\n        yield repo\n    finally:\n        if seafile_api.get_repo(repo.id):\n            # The repo may be deleted in the test case\n            seafile_api.remove_repo(repo.id)\n\n@pytest.yield_fixture(scope='function')\ndef repo():\n    repo = create_and_get_repo(\n        'test_repo_{}'.format(randstring(10)), '', USER, passwd=None\n    )\n    try:\n        seafile_api.post_dir(repo.id, '/', 'dir1', USER)\n        seafile_api.post_dir(repo.id, '/', 'dir2', USER)\n        yield repo\n    finally:\n        if seafile_api.get_repo(repo.id):\n            # The repo may be deleted in the test case\n            seafile_api.remove_repo(repo.id)\n\n@pytest.yield_fixture(scope='function')\ndef group():\n    group = create_and_get_group(\n            'test_group_{}'.format(randstring(10)), USER, gtype=None\n    )\n    try:\n        yield group\n    finally:\n        if ccnet_api.get_group(group.id):\n            ccnet_api.remove_group(group.id)\n"
  },
  {
    "path": "tests/test_file_operation/test_file_operation.py",
    "content": "import pytest\nimport os\nimport time\nimport json\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\nfile_name = 'test.txt'\nnew_file_name = 'new_test.txt'\nnew_file_name_2 = 'new_test_2.txt'\nempty_file_name = 'empty_test.txt'\nnew_empty_file_name = 'new_empty_test.txt'\nfile_content = 'test file content'\nfile_path = os.getcwd() + '/' + file_name\ndir_name = \"test_dir\"\n\ndef create_the_file ():\n    with open(file_path, 'w') as fp:\n        fp.write(file_content)\n\n@pytest.mark.parametrize('in_batch',\n                         [True, False])\ndef test_file_operation(in_batch):\n    t_repo_version = 1\n    t_repo_id1 = api.create_repo('test_file_operation1', '', USER, passwd = None)\n\n    create_the_file()\n\n    # test post_file\n    assert api.post_file(t_repo_id1, file_path, '/', file_name, USER) == 0\n    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + file_name)\n    t_file_size = len(file_content)\n    assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version, t_file_id)\n\n    # test post_dir\n    assert api.post_dir(t_repo_id1, '/', dir_name, USER) == 0\n\n    # test copy_file (synchronize)\n    t_copy_file_result1 = api.copy_file(t_repo_id1, '/', '[\\\"'+file_name+'\\\"]', t_repo_id1, '/', '[\\\"'+new_file_name+'\\\"]', USER, 0, 1)\n    assert t_copy_file_result1\n    assert t_copy_file_result1.task_id is None\n    assert not t_copy_file_result1.background\n    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name)\n    assert t_file_size == api.get_file_size(t_repo_id1, t_repo_version, t_file_id)\n\n    # test copy_file (asynchronous)\n    t_repo_id2 = api.create_repo('test_file_operation2', '', USER, passwd = None)\n    usage = api.get_user_self_usage (USER)\n    api.set_user_quota(USER, usage + 1);\n    t_copy_file_result2 = api.copy_file(t_repo_id1, '/', '[\\\"'+file_name+'\\\"]', t_repo_id2, '/', '[\\\"'+file_name+'\\\"]', USER, 1, 0)\n    assert t_copy_file_result2\n    assert t_copy_file_result2.background\n    while True:\n        time.sleep(0.1)\n        t_copy_task = api.get_copy_task(t_copy_file_result2.task_id)\n        assert t_copy_task.failed\n        assert t_copy_task.failed_reason == 'Quota is full'\n        if t_copy_task.failed:\n            break;\n\n    api.set_user_quota(USER, -1);\n    t_copy_file_result2 = api.copy_file(t_repo_id1, '/', '[\\\"'+file_name+'\\\"]', t_repo_id2, '/', '[\\\"'+file_name+'\\\"]', USER, 1, 0)\n    assert t_copy_file_result2\n    assert t_copy_file_result2.task_id\n    assert t_copy_file_result2.background\n    while True:\n        time.sleep(0.1)\n        t_copy_task = api.get_copy_task(t_copy_file_result2.task_id)\n        if t_copy_task.successful:\n            break;\n    t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + file_name)\n    assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version, t_file_id)\n\n    # test move_file (synchronize)\n    t_move_file_info1 = api.get_dirent_by_path(t_repo_id1, '/' + new_file_name)\n    t_move_file_result1 = api.move_file(t_repo_id1, '/', '[\\\"'+new_file_name+'\\\"]', t_repo_id1, '/' + dir_name, '[\\\"'+new_file_name+'\\\"]', 1, USER, 0, 1)\n    assert t_move_file_result1\n    t_move_file_info2 = api.get_dirent_by_path(t_repo_id1, '/' + dir_name + '/' + new_file_name)\n    assert t_move_file_info1.mtime == t_move_file_info2.mtime\n    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + new_file_name)\n    assert t_file_id is None\n\n    # test move_file (synchronize)\n    t_move_file_result1 = api.move_file(t_repo_id1, '/' + dir_name, '[\\\"'+new_file_name+'\\\"]', t_repo_id1, '/', '[\\\"'+new_file_name_2+'\\\"]', 1, USER, 0, 1)\n    assert t_move_file_result1\n    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + dir_name + '/' + new_file_name)\n    assert t_file_id is None\n\n    # test move_file (asynchronous)\n    usage = api.get_user_self_usage (USER)\n    api.set_user_quota(USER, usage + 1);\n    t_move_file_result2 = api.move_file(t_repo_id1, '/', '[\\\"'+file_name+'\\\"]', t_repo_id2, '/' , '[\\\"'+new_file_name+'\\\"]', 1, USER, 1, 0)\n    assert t_move_file_result2\n    assert t_move_file_result2.task_id\n    assert t_move_file_result2.background\n    while True:\n        time.sleep(0.1)\n        t_move_task = api.get_copy_task(t_move_file_result2.task_id)\n        assert t_move_task.failed\n        assert t_move_task.failed_reason == 'Quota is full'\n        if t_move_task.failed:\n            break\n\n    api.set_user_quota(USER, -1);\n    t_move_file_result2 = api.move_file(t_repo_id1, '/', '[\\\"'+file_name+'\\\"]', t_repo_id2, '/' , '[\\\"'+new_file_name+'\\\"]', 1, USER, 1, 0)\n    assert t_move_file_result2\n    assert t_move_file_result2.task_id\n    assert t_move_file_result2.background\n    while True:\n        time.sleep(0.1)\n        t_move_task = api.get_copy_task(t_move_file_result2.task_id)\n        if t_move_task.successful:\n            break\n    t_file_id = api.get_file_id_by_path(t_repo_id2, '/' + new_file_name)\n    assert t_file_size == api.get_file_size(t_repo_id2, t_repo_version, t_file_id)\n\n    # test post_empty_file\n    assert api.post_empty_file(t_repo_id1, '/' + dir_name, empty_file_name, USER) == 0\n    t_file_id = api.get_file_id_by_path(t_repo_id1, '/' + dir_name + '/' + empty_file_name)\n    assert api.get_file_size(t_repo_id1, t_repo_version, t_file_id) == 0\n\n    # test rename_file\n    assert api.rename_file(t_repo_id1, '/' + dir_name, empty_file_name, new_empty_file_name, USER) == 0\n\n    #test put_file\n    t_new_file_id = api.put_file(t_repo_id1, file_path, '/' + dir_name, new_empty_file_name, USER, None)\n    assert t_new_file_id\n\n    # test get_file_revisions\n    t_commit_list = api.get_file_revisions(t_repo_id2, None, '/' + file_name, 2)\n    assert t_commit_list\n    assert len(t_commit_list) == 2\n    assert t_commit_list[0].creator_name == USER\n    \n    # test del_file\n    if in_batch:\n        assert api.batch_del_files(t_repo_id2, '[\\\"'+'/'+file_name+'\\\"]', USER) == 0\n    else:\n        assert api.del_file(t_repo_id2, '/', '[\\\"'+file_name+'\\\"]', USER) == 0\n\n    # test get_deleted\n    t_deleted_file_list = api.get_deleted(t_repo_id2, 1)\n    assert t_deleted_file_list\n    assert len(t_deleted_file_list) == 2\n    assert t_deleted_file_list[0].obj_name == file_name\n    assert t_deleted_file_list[0].basedir == '/'\n\n    # test del a non-exist file. should return 0.\n    if in_batch:\n        file_list = [\"/\"+file_name, \"/\"+new_file_name]\n        assert api.batch_del_files(t_repo_id2, json.dumps(file_list), USER) == 0\n        t_deleted_file_list = api.get_deleted(t_repo_id2, 1)\n        assert t_deleted_file_list\n        assert len(t_deleted_file_list) == 3\n\n        file_list = [\"/\"+dir_name+\"/\"+new_empty_file_name, \"/\"+dir_name+\"/\"+new_file_name, \"/\"+new_file_name_2]\n        assert api.batch_del_files(t_repo_id1, json.dumps(file_list), USER) == 0\n        t_deleted_file_list = api.get_deleted(t_repo_id1, 1)\n        assert t_deleted_file_list\n        assert len(t_deleted_file_list) == 4\n    else:\n        assert api.del_file(t_repo_id2, '/', '[\\\"'+file_name+'\\\"]', USER) == 0\n\n        assert api.del_file(t_repo_id1, '/' + dir_name, '[\\\"'+new_empty_file_name+'\\\"]', USER) == 0\n        assert api.del_file(t_repo_id1, '/' + dir_name, '[\\\"'+new_file_name+'\\\"]', USER) == 0\n        assert api.del_file(t_repo_id2, '/', '[\\\"'+new_file_name+'\\\"]', USER) == 0\n        assert api.del_file(t_repo_id1, '/', '[\\\"'+new_file_name_2+'\\\"]', USER) == 0\n\n    time.sleep(1)\n    api.remove_repo(t_repo_id1)\n    api.remove_repo(t_repo_id2)\n"
  },
  {
    "path": "tests/test_file_operation/test_merge_virtual_repo.py",
    "content": "import pytest\nimport requests\nimport os\nimport time\nfrom tests.config import USER, USER2\nfrom seaserv import seafile_api as api\nfrom requests_toolbelt import MultipartEncoder\n\nfile_name = 'file.txt'\nfile_name_not_replaced = 'file (1).txt'\nfile_path = os.getcwd() + '/' + file_name\nfile_content = 'File content.\\r\\n'\nfile_size = len(file_content)\n\nresumable_file_name = 'resumable.txt'\nresumable_test_file_name = 'test/resumable.txt'\nchunked_part1_name = 'part1.txt'\nchunked_part2_name = 'part2.txt'\nchunked_part1_path = os.getcwd() + '/' + chunked_part1_name\nchunked_part2_path = os.getcwd() + '/' + chunked_part2_name\nchunked_part1_content = 'First line.\\r\\n'\nchunked_part2_content = 'Second line.\\r\\n'\ntotal_size = len(chunked_part1_content) + len(chunked_part2_content)\n\n#File_id is not used when upload files, but\n#the argument obj_id of get_fileserver_access_token shouldn't be NULL.\nfile_id = '0000000000000000000000000000000000000000'\n\ndef create_test_file():\n    fp = open(file_path, 'w')\n    fp.close()\n    fp = open(chunked_part1_path, 'w')\n    fp.close()\n    fp = open(chunked_part2_path, 'w')\n    fp.close()\n\ndef create_test_dir(repo, dir_name):\n    parent_dir = '/'\n    api.post_dir(repo.id,parent_dir,dir_name,USER)\n\ndef assert_upload_response(response, replace, file_exist):\n    assert response.status_code == 200\n    response_json = response.json()\n    assert response_json[0]['size'] == 0\n    assert response_json[0]['id'] == file_id\n    if file_exist and not replace:\n        assert response_json[0]['name'] == file_name_not_replaced\n    else:\n        assert response_json[0]['name'] == file_name\n\ndef assert_resumable_upload_response(response, repo_id, file_name, upload_complete):\n    assert response.status_code == 200\n    if not upload_complete:\n        assert response.text == '{\"success\": true}'\n        offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name)\n        assert offset == len(chunked_part1_content)\n    else:\n        response_json = response.json()\n        assert response_json[0]['size'] == total_size\n        new_file_id = response_json[0]['id']\n        assert len(new_file_id) == 40 and new_file_id != file_id\n        assert response_json[0]['name'] == resumable_file_name\n\ndef assert_update_response(response, is_json):\n    assert response.status_code == 200\n    if is_json:\n        response_json = response.json()\n        assert response_json[0]['size'] == file_size\n        new_file_id = response_json[0]['id']\n        assert len(new_file_id) == 40 and new_file_id != file_id\n        assert response_json[0]['name'] == file_name\n    else:\n        new_file_id = response.text\n        assert len(new_file_id) == 40 and new_file_id != file_id\n\ndef request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax):\n    write_file(chunked_part1_path, chunked_part1_content)\n    write_file(chunked_part2_path, chunked_part2_content)\n\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': parent_dir,\n                    'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')\n            })\n    params = {'ret-json':'1'}\n    headers[\"Content-type\"] = m.content_type\n    if is_ajax:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m)\n    else:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m, params = params)\n    return response\n\ndef write_file(file_path, file_content):\n    fp = open(file_path, 'w')\n    fp.write(file_content)\n    fp.close()\n\ndef del_local_files():\n    os.remove(file_path)\n    os.remove(chunked_part1_path)\n    os.remove(chunked_part2_path)\n\ndef test_merge_virtual_repo(repo):\n    api.post_dir(repo.id, '/dir1', 'subdir1', USER)\n    api.post_dir(repo.id, '/dir2', 'subdir2', USER)\n    v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw')\n\n    create_test_file()\n    params = {'ret-json':'1'}\n    obj_id = '{\"parent_dir\":\"/\"}'\n    create_test_dir(repo,'test')\n\n    #test upload file to vritual repo root dir.\n    token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (v_repo_id)\n    assert repo_size == 0\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test resumable upload file to virtual repo root dir\n    parent_dir = '/'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False)\n    assert_resumable_upload_response(response, v_repo_id,\n                                     resumable_file_name, False)\n\n    time.sleep (1.5)\n    v_repo_size = api.get_repo_size (v_repo_id)\n    assert v_repo_size == 0\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)\n    assert_resumable_upload_response(response, v_repo_id,\n                                     resumable_file_name, True)\n\n    time.sleep (2.5)\n    v_repo_size = api.get_repo_size (v_repo_id)\n    assert v_repo_size == total_size\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size\n\n    #test update file to virtual repo.\n    write_file(file_path, file_content)\n    token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2, False)\n    update_url_base = 'http://127.0.0.1:8082/update-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'target_file': '/' + file_name,\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(update_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_update_response(response, False)\n\n    time.sleep (1.5)\n    v_repo_size = api.get_repo_size (v_repo_id)\n    assert v_repo_size == total_size + file_size\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size + file_size\n\n    api.del_file(v_repo_id, '/', '[\\\"'+file_name+'\\\"]', USER2)\n\n    time.sleep (1.5)\n    v_repo_size = api.get_repo_size (v_repo_id)\n    assert v_repo_size == total_size\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size\n\n    api.del_file(v_repo_id, '/', '[\\\"'+resumable_file_name+'\\\"]', USER2)\n\n    time.sleep (1.5)\n    v_repo_size = api.get_repo_size (v_repo_id)\n    assert v_repo_size == 0\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    api.del_file(repo.id, '/dir1', '[\\\"subdir1\\\"]', USER)\n    api.del_file(repo.id, '/dir2', '[\\\"subdir1\\\"]', USER)\n    assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0\n    del_local_files()\n"
  },
  {
    "path": "tests/test_file_operation/test_search_files.py",
    "content": "import pytest\nimport os\nimport time\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\nfile_name = 'test.txt'\nfile_content = 'test file content'\nfile_path = os.getcwd() + '/' + file_name\ndir_name = \"test_dir\"\n\ndef create_the_file ():\n    with open(file_path, 'w') as fp:\n        fp.write(file_content)\n\ndef test_file_operation():\n    t_repo_version = 1\n    t_repo_id1 = api.create_repo('test_file_operation1', '', USER, passwd = None)\n\n    create_the_file()\n\n    assert api.post_file(t_repo_id1, file_path, '/', file_name, USER) == 0\n    assert api.post_dir(t_repo_id1, '/', dir_name, USER) == 0\n\n    #test search files\n    file_list = api.search_files (t_repo_id1, \"test\")\n    assert len(file_list) == 2\n    assert file_list[0].path == \"/test.txt\"\n    assert file_list[0].is_dir == False\n    assert file_list[1].path == \"/test_dir\"\n    assert file_list[1].is_dir == True\n\n    file_list = api.search_files (t_repo_id1, \"dir\")\n    assert len(file_list) == 1\n    assert file_list[0].path == \"/test_dir\"\n    assert file_list[0].is_dir == True\n\n    file_list = api.search_files (t_repo_id1, \"DiR\")\n    assert len(file_list) == 1\n    assert file_list[0].path == \"/test_dir\"\n    assert file_list[0].is_dir == True\n\n    api.remove_repo(t_repo_id1)\n"
  },
  {
    "path": "tests/test_file_operation/test_upload_and_update.py",
    "content": "import pytest\nimport requests\nimport os\nimport time\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\nfrom requests_toolbelt import MultipartEncoder\n\nfile_name = 'file.txt'\nfile_name_not_replaced = 'file (1).txt'\nfile_path = os.getcwd() + '/' + file_name\nfile_content = 'File content.\\r\\n'\nfile_size = len(file_content)\n\nresumable_file_name = 'resumable.txt'\nresumable_test_file_name = 'test/resumable.txt'\nchunked_part1_name = 'part1.txt'\nchunked_part2_name = 'part2.txt'\nchunked_part1_path = os.getcwd() + '/' + chunked_part1_name\nchunked_part2_path = os.getcwd() + '/' + chunked_part2_name\nchunked_part1_content = 'First line.\\r\\n'\nchunked_part2_content = 'Second line.\\r\\n'\ntotal_size = len(chunked_part1_content) + len(chunked_part2_content)\n\n#File_id is not used when upload files, but\n#the argument obj_id of get_fileserver_access_token shouldn't be NULL.\nfile_id = '0000000000000000000000000000000000000000'\n\ndef create_test_file():\n    fp = open(file_path, 'w')\n    fp.close()\n    fp = open(chunked_part1_path, 'w')\n    fp.close()\n    fp = open(chunked_part2_path, 'w')\n    fp.close()\n\ndef create_test_dir(repo, dir_name):\n    parent_dir = '/'\n    api.post_dir(repo.id,parent_dir,dir_name,USER)\n\ndef assert_upload_response(response, replace, file_exist):\n    assert response.status_code == 200\n    response_json = response.json()\n    assert response_json[0]['size'] == 0\n    assert response_json[0]['id'] == file_id\n    if file_exist and not replace:\n        assert response_json[0]['name'] == file_name_not_replaced\n    else:\n        assert response_json[0]['name'] == file_name\n\ndef assert_resumable_upload_response(response, repo_id, file_name, upload_complete):\n    assert response.status_code == 200\n    if not upload_complete:\n        assert response.text == '{\"success\": true}'\n        offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name)\n        assert offset == len(chunked_part1_content)\n    else:\n        response_json = response.json()\n        assert response_json[0]['size'] == total_size\n        new_file_id = response_json[0]['id']\n        assert len(new_file_id) == 40 and new_file_id != file_id\n        assert response_json[0]['name'] == resumable_file_name\n\ndef assert_update_response(response, is_json):\n    assert response.status_code == 200\n    if is_json:\n        response_json = response.json()\n        assert response_json[0]['size'] == file_size\n        new_file_id = response_json[0]['id']\n        assert len(new_file_id) == 40 and new_file_id != file_id\n        assert response_json[0]['name'] == file_name\n    else:\n        new_file_id = response.text\n        assert len(new_file_id) == 40 and new_file_id != file_id\n\ndef request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax):\n    write_file(chunked_part1_path, chunked_part1_content)\n    write_file(chunked_part2_path, chunked_part2_content)\n\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': parent_dir,\n                    'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')\n            })\n    params = {'ret-json':'1'}\n    headers[\"Content-type\"] = m.content_type\n    if is_ajax:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m)\n    else:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m, params = params)\n    return response\n\ndef write_file(file_path, file_content):\n    fp = open(file_path, 'w')\n    fp.write(file_content)\n    fp.close()\n\ndef del_repo_files(repo_id):\n    api.del_file(repo_id, '/', '[\\\"'+file_name+'\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"'+file_name_not_replaced+'\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"subdir\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"'+resumable_file_name+'\\\"]', USER)\n\ndef del_local_files():\n    os.remove(file_path)\n    os.remove(chunked_part1_path)\n    os.remove(chunked_part2_path)\n\ndef test_ajax(repo):\n    create_test_file()\n    create_test_dir(repo,'test')\n    obj_id = '{\"parent_dir\":\"/\"}'\n\n    #test upload file to test dir.\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to root dir.\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload file to test dir when file already exists.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to root dir when file already exists.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload file to subdir whose parent is test dir.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'relative_path':'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n    #test upload file to subdir whose parent is root dir.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'relative_path':'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload file to subdir whose parent is test dir when file already exists.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'relative_path':'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to subdir whose parent is root dir when file already exists.\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'relative_path':'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test resumable upload file to test dir\n    parent_dir = '/test'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_test_file_name, False)\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True)\n    assert response.status_code == 403\n\n    #test resumable upload file to root dir\n    parent_dir = '/'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, False)\n\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, True)\n\n    time.sleep (2)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size\n\n    #test update file.\n    write_file(file_path, file_content)\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False)\n    update_url_base = 'http://127.0.0.1:8082/update-aj/' + token\n    m = MultipartEncoder(\n            fields={\n                    'target_file': '/' + file_name,\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(update_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_update_response(response, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size + file_size\n\n    time.sleep(1)\n    del_repo_files(repo.id)\n    del_local_files()\n\ndef test_api(repo):\n    create_test_file()\n    params = {'ret-json':'1'}\n    obj_id = '{\"parent_dir\":\"/\"}'\n    create_test_dir(repo,'test')\n    #test upload file to test dir instead of  root dir.\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to root dir.\n    params = {'ret-json':'1'}\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload file to test dir instead of root dir when file already exists and replace is set.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'replace': '1',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to root dir when file already exists and replace is set.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'replace': '1',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, True, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload file to test dir instead of root dir when file already exists and replace is unset.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload file to root dir when file already exists and replace is unset.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload the file to subdir whose parent is test.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'relative_path': 'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload the file to subdir.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'relative_path': 'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test upload the file to subdir whose parent is test when file already exists and replace is set.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'relative_path': 'subdir',\n                    'replace': '1',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #test upload the file to subdir when file already exists and replace is set.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'relative_path': 'subdir',\n                    'replace': '1',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, True, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #unset test upload the file to subdir whose parent is test dir when file already exists and replace is unset.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/test',\n                    'relative_path': 'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert response.status_code == 403\n\n    #unset test upload the file to subdir when file already exists and replace is unset.\n    params = {'ret-json':'1'}\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'relative_path': 'subdir',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    #test resumable upload file to test\n    parent_dir = '/test'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, False)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_test_file_name, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)\n    assert response.status_code == 403\n\n    #test resumable upload file to root dir\n    parent_dir = '/'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, False)\n\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == 0\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, True)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size\n\n    #test update file.\n    write_file(file_path, file_content)\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False)\n    update_url_base = 'http://127.0.0.1:8082/update-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'target_file': '/' + file_name,\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(update_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_update_response(response, False)\n\n    time.sleep (1.5)\n    repo_size = api.get_repo_size (repo.id)\n    assert repo_size == total_size + file_size\n\n    time.sleep(1)\n    del_repo_files(repo.id)\n    del_local_files()\n\ndef test_ajax_mtime(repo):\n    create_test_file()\n    obj_id = '{\"parent_dir\":\"/\"}'\n    mtime = '2023-09-27T18:18:25+08:00'\n\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'last_modify': mtime,\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    dent = api.get_dirent_by_path(repo.id, '/' + file_name)\n\n    assert dent.mtime == 1695809905\n\ndef test_api_mtime(repo):\n    create_test_file()\n    params = {'ret-json':'1'}\n    obj_id = '{\"parent_dir\":\"/\"}'\n    mtime = '2023-09-27T18:18:25+08:00'\n\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'last_modify': mtime,\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response, False, False)\n\n    dent = api.get_dirent_by_path(repo.id, '/' + file_name)\n\n    assert dent.mtime == 1695809905\n\n"
  },
  {
    "path": "tests/test_file_operation/test_upload_large_files.py",
    "content": "import pytest\nimport requests\nimport os\nimport hashlib\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\nfrom requests_toolbelt import MultipartEncoder\n\nfile_name = 'file.txt'\nfile_name_not_replaced = 'file (1).txt'\nfile_path = os.getcwd() + '/' + file_name\nfile_size = 400*1024*1024\n\ndownload_file_name = 'download_file.txt'\ndownload_file_path = os.getcwd() + '/' + download_file_name\n\nresumable_download_file_name = 'resumable_download_file.txt'\nresumable_download_file_path = os.getcwd() + '/' + resumable_download_file_name\n\nresumable_file_name = 'resumable.txt'\nchunked_part1_name = 'part1.txt'\nchunked_part2_name = 'part2.txt'\nchunked_part1_path = os.getcwd() + '/' + chunked_part1_name\nchunked_part2_path = os.getcwd() + '/' + chunked_part2_name\nchunked_part1_size = 200*1024*1024\nchunked_part2_size = 200*1024*1024\ntotal_size = chunked_part1_size + chunked_part2_size\n\n#File_id is not used when upload files, but\n#the argument obj_id of get_fileserver_access_token shouldn't be NULL.\nfile_id = '0000000000000000000000000000000000000000'\n\ndef create_test_file():\n    fp = open(file_path, 'wb')\n    fp.write(os.urandom(file_size))\n    fp.close()\n    fp = open(chunked_part1_path, 'wb')\n    fp.write(os.urandom(chunked_part1_size))\n    fp.close()\n    fp = open(chunked_part2_path, 'wb')\n    fp.write(os.urandom(chunked_part2_size))\n    fp.close()\n\ndef create_test_dir(repo, dir_name):\n    parent_dir = '/'\n    api.post_dir(repo.id,parent_dir,dir_name,USER)\n\ndef assert_upload_response(response):\n    assert response.status_code == 200\n    response_json = response.json()\n    assert response_json[0]['size'] == file_size\n    assert response_json[0]['id'] != file_id\n    assert response_json[0]['name'] == file_name\n\ndef assert_resumable_upload_response(response, repo_id, file_name, upload_complete):\n    assert response.status_code == 200\n    if not upload_complete:\n        assert response.text == '{\"success\": true}'\n        offset = api.get_upload_tmp_file_offset(repo_id, '/' + file_name)\n        assert offset == chunked_part1_size\n    else:\n        response_json = response.json()\n        assert response_json[0]['size'] == total_size\n        new_file_id = response_json[0]['id']\n        assert len(new_file_id) == 40 and new_file_id != file_id\n        assert response_json[0]['name'] == resumable_file_name\n\ndef request_resumable_upload(filepath, headers,upload_url_base,parent_dir,is_ajax):\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': parent_dir,\n                    'file': (resumable_file_name, open(filepath, 'rb'), 'application/octet-stream')\n            })\n    params = {'ret-json':'1'}\n    headers[\"Content-type\"] = m.content_type\n    if is_ajax:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m)\n    else:\n        response = requests.post(upload_url_base, headers = headers,\n                             data = m, params = params)\n    return response\n\ndef write_file(file_path, file_content):\n    fp = open(file_path, 'w')\n    fp.write(file_content)\n    fp.close()\n\ndef del_repo_files(repo_id):\n    api.del_file(repo_id, '/', '[\\\"'+file_name+'\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"'+file_name_not_replaced+'\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"subdir\\\"]', USER)\n    api.del_file(repo_id, '/', '[\\\"'+resumable_file_name+'\\\"]', USER)\n\ndef del_local_files():\n    os.remove(file_path)\n    os.remove(download_file_path)\n    os.remove(chunked_part1_path)\n    os.remove(chunked_part2_path)\n    os.remove(resumable_download_file_path)\n\ndef sha1sum(filepath):\n    with open(filepath, 'rb') as f:\n        return hashlib.sha1(f.read()).hexdigest()\n\ndef chunked_sha1sum(chunked_part1, chunked_part2):\n    f1 = open(chunked_part1, 'rb')\n    f2 = open(chunked_part2, 'rb')\n    data = f1.read()+f2.read()\n    sha1 = hashlib.sha1(data).hexdigest()\n    f1.close()\n    f2.close()\n    return sha1\n\ndef test_large_files_ajax(repo):\n    create_test_file()\n    create_test_dir(repo,'test')\n    obj_id = '{\"parent_dir\":\"/\"}'\n\n    # upload large file by upload-aj\n    file_id1 = sha1sum(file_path)\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-aj/'+ token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response)\n\n    # download file and check sha1\n    obj_id = api.get_file_id_by_path(repo.id, '/' + file_name)\n    assert obj_id != None\n    token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False)\n    download_url = 'http://127.0.0.1:8082/files/' + token + '/' + file_name\n    response = requests.get(download_url)\n    assert response.status_code == 200\n    with open(download_file_path, 'wb') as fp:\n       fp.write(response.content)\n\n    file_id2 = sha1sum(download_file_path)\n    assert file_id1 == file_id2\n\n    file_id1 = chunked_sha1sum(chunked_part1_path, chunked_part2_path)\n    parent_dir = '/'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(chunked_part1_size - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, True)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, False)\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(chunked_part1_size),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, True)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, True)\n\n    # download file and check sha1\n    obj_id = api.get_file_id_by_path(repo.id, '/' + resumable_file_name)\n    assert obj_id != None\n    token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False)\n    download_url = 'http://127.0.0.1:8082/files/' + token + '/' + resumable_file_name\n    response = requests.get(download_url)\n    assert response.status_code == 200\n    with open(resumable_download_file_path, 'wb') as fp:\n       fp.write(response.content)\n    file_id2 = sha1sum(resumable_download_file_path)\n    assert file_id1 == file_id2\n\n    del_repo_files(repo.id)\n    del_local_files()\n\ndef test_large_files_api(repo):\n    create_test_file()\n    params = {'ret-json':'1'}\n    obj_id = '{\"parent_dir\":\"/\"}'\n    create_test_dir(repo,'test')\n\n    #test upload file to root dir.\n    file_id1 = sha1sum(file_path)\n    params = {'ret-json':'1'}\n    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)\n    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token\n    m = MultipartEncoder(\n            fields={\n                    'parent_dir': '/',\n                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')\n            })\n    response = requests.post(upload_url_base, params = params,\n                             data = m, headers = {'Content-Type': m.content_type})\n    assert_upload_response(response)\n\n    # download file and check sha1\n    obj_id = api.get_file_id_by_path(repo.id, '/' + file_name)\n    assert obj_id != None\n    token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False)\n    download_url = 'http://127.0.0.1:8082/files/' + token + '/' + file_name\n    response = requests.get(download_url)\n    assert response.status_code == 200\n    with open(download_file_path, 'wb') as fp:\n       fp.write(response.content)\n\n    file_id2 = sha1sum(download_file_path)\n    assert file_id1 == file_id2\n\n    #test resumable upload file to test\n    file_id1 = chunked_sha1sum(chunked_part1_path, chunked_part2_path)\n    parent_dir = '/'\n    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(chunked_part1_size - 1),\n                                                      str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, False)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, False)\n\n    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(chunked_part1_size),\n                                                       str(total_size - 1),\n                                                       str(total_size)),\n               'Content-Disposition':'attachment; filename=\\\"{}\\\"'.format(resumable_file_name)}\n    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)\n    assert_resumable_upload_response(response, repo.id,\n                                     resumable_file_name, True)\n\n    obj_id = api.get_file_id_by_path(repo.id, '/' + resumable_file_name)\n    assert obj_id != None\n    token = api.get_fileserver_access_token (repo.id, obj_id, 'download', USER, False)\n    download_url = 'http://127.0.0.1:8082/files/' + token + '/' + resumable_file_name\n    response = requests.get(download_url)\n    assert response.status_code == 200\n    with open(resumable_download_file_path, 'wb') as fp:\n       fp.write(response.content)\n    file_id2 = sha1sum(resumable_download_file_path)\n    assert file_id1 == file_id2\n\n    del_repo_files(repo.id)\n    del_local_files()\n"
  },
  {
    "path": "tests/test_file_operation/test_zip_download.py",
    "content": "import pytest\nimport requests\nimport os\nimport time\nimport zipfile\nimport json\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\nfile1_name = 'file1.txt'\nfile2_name = 'file2.txt'\nfile1_path = os.getcwd() + '/' + file1_name\nfile2_path = os.getcwd() + '/' + file2_name\nfile1_content ='File1 content'\nfile2_content ='File2 content'\ndownload_dir_path = os.getcwd() + '/download_dir'\n\ndef create_test_files():\n    os.mkdir(download_dir_path)\n    with open(file1_path, 'w') as fp1:\n        fp1.write(file1_content)\n    with open(file2_path, 'w') as fp2:\n        fp2.write(file2_content)\n\ndef remove_test_files():\n    os.rmdir(download_dir_path)\n    os.remove(file1_path)\n    os.remove(file2_path)\n\ndef test_zip_download():\n    create_test_files()\n    t_repo_id = api.create_repo('test_zip_download', '', USER)\n    base_url = 'http://127.0.0.1:8082/'\n\n    #test zip download dir\n    dir_name = 'dir'\n    api.post_dir(t_repo_id, '/', dir_name, USER)\n    api.post_file(t_repo_id, file1_path, '/dir', file1_name, USER)\n    api.post_file(t_repo_id, file2_path, '/dir', file2_name, USER)\n\n    dir_id = api.get_dir_id_by_path(t_repo_id, '/dir')\n    obj_id = {'obj_id': dir_id, 'dir_name': dir_name, 'is_windows': 0}\n    obj_id_json_str = json.dumps(obj_id)\n    token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,\n                                            'download-dir', USER)\n\n    time.sleep(1)\n    download_url = base_url + 'zip/' + token\n    response = requests.get(download_url)\n    assert response.status_code == 200\n\n    download_zipfile_path = download_dir_path + '/dir.zip'\n    with open(download_zipfile_path, 'wb') as fp:\n       fp.write(response.content)\n    zipFile = zipfile.ZipFile(download_zipfile_path)\n    for name in zipFile.namelist():\n        zipFile.extract(name, download_dir_path)\n    zipFile.close()\n    assert os.path.exists(download_dir_path + '/dir.zip')\n    assert os.path.exists(download_dir_path + '/dir')\n    assert os.path.exists(download_dir_path + '/dir' + '/file1.txt')\n    assert os.path.exists(download_dir_path + '/dir' + '/file2.txt')\n    with open(download_dir_path + '/dir' + '/file1.txt', 'r') as fp1:\n       line = fp1.read()\n    assert line == file1_content\n    with open(download_dir_path + '/dir' + '/file2.txt', 'r') as fp2:\n       line = fp2.read()\n    assert line == file2_content\n\n    os.remove(download_dir_path + '/dir' + '/file1.txt')\n    os.remove(download_dir_path + '/dir' + '/file2.txt')\n    os.rmdir(download_dir_path + '/dir')\n    os.remove(download_dir_path + '/dir.zip')\n\n    #test zip download empty dir\n    empty_dir_name = 'empty_dir'\n    api.post_dir(t_repo_id, '/', empty_dir_name, USER)\n\n    dir_id = api.get_dir_id_by_path(t_repo_id, '/empty_dir')\n    obj_id = {'obj_id': dir_id, 'dir_name': empty_dir_name, 'is_windows': 0}\n    obj_id_json_str = json.dumps(obj_id)\n    token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,\n                                            'download-dir', USER)\n    time.sleep(1)\n    download_url = base_url + 'zip/' + token\n    response = requests.get(download_url)\n    assert response.status_code == 200\n\n    download_zipfile_path = download_dir_path + '/empty_dir.zip'\n    with open(download_zipfile_path, 'wb') as fp:\n       fp.write(response.content)\n    zipFile = zipfile.ZipFile(download_zipfile_path)\n    for name in zipFile.namelist():\n        zipFile.extract(name, download_dir_path)\n    zipFile.close()\n    assert os.path.exists(download_dir_path + '/empty_dir')\n    assert not os.listdir(download_dir_path + '/empty_dir')\n\n    os.rmdir(download_dir_path + '/empty_dir')\n    os.remove(download_dir_path + '/empty_dir.zip')\n\n    #test zip download mutliple files\n    api.post_file(t_repo_id, file1_path, '/', file1_name, USER)\n    api.post_file(t_repo_id, file2_path, '/', file2_name, USER)\n    obj_id = {'parent_dir': '/', 'file_list': [file1_name, file2_name], 'is_windows' : 0}\n    obj_id_json_str = json.dumps(obj_id)\n    token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,\n                                            'download-multi', USER)\n\n    time.sleep(1)\n    download_url = base_url + 'zip/' + token\n    response = requests.get(download_url)\n    assert response.status_code == 200\n\n    download_zipfile_path = download_dir_path + '/multi_files.zip'\n    with open(download_zipfile_path, 'wb') as fp:\n       fp.write(response.content)\n    zipFile = zipfile.ZipFile(download_zipfile_path)\n    for name in zipFile.namelist():\n        zipFile.extract(name, download_dir_path)\n    zipFile.close()\n    assert os.path.exists(download_dir_path + '/file1.txt')\n    assert os.path.exists(download_dir_path + '/file2.txt')\n    with open(download_dir_path + '/file1.txt', 'r') as fp1:\n       line = fp1.read()\n    assert line == file1_content\n    with open(download_dir_path + '/file2.txt', 'r') as fp2:\n       line = fp2.read()\n    assert line == file2_content\n    os.remove(download_dir_path + '/file1.txt')\n    os.remove(download_dir_path + '/file2.txt')\n    os.remove(download_dir_path + '/multi_files.zip')\n\n    #test zip download mutliple files in multi-level\n    api.post_file(t_repo_id, file2_path, '/dir', file2_name, USER)\n    obj_id = {'parent_dir': '/', 'file_list': [file1_name, 'dir/'+file2_name], 'is_windows' : 0}\n    obj_id_json_str = json.dumps(obj_id)\n    token = api.get_fileserver_access_token(t_repo_id, obj_id_json_str,\n                                            'download-multi', USER)\n\n    time.sleep(1)\n    download_url = base_url + 'zip/' + token\n    response = requests.get(download_url)\n    assert response.status_code == 200\n\n    download_zipfile_path = download_dir_path + '/multi_files.zip'\n    with open(download_zipfile_path, 'wb') as fp:\n       fp.write(response.content)\n    zipFile = zipfile.ZipFile(download_zipfile_path)\n    for name in zipFile.namelist():\n        zipFile.extract(name, download_dir_path)\n    zipFile.close()\n    assert os.path.exists(download_dir_path + '/file1.txt')\n    assert os.path.exists(download_dir_path + '/file2.txt')\n    with open(download_dir_path + '/file1.txt', 'r') as fp1:\n       line = fp1.read()\n    assert line == file1_content\n    with open(download_dir_path + '/file2.txt', 'r') as fp2:\n       line = fp2.read()\n    assert line == file2_content\n    os.remove(download_dir_path + '/file1.txt')\n    os.remove(download_dir_path + '/file2.txt')\n    os.remove(download_dir_path + '/multi_files.zip')\n\n    remove_test_files()\n    api.remove_repo(t_repo_id)\n\n"
  },
  {
    "path": "tests/test_file_property_and_dir_listing/test_file_property_and_dir_listing.py",
    "content": "import pytest\nimport os\nimport time\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\nfile_name = 'test.txt'\ndir_name = 'test_dir'\nfile_content = 'test file content'\nfile_path = os.getcwd() + '/' + file_name\n\ndef create_the_file ():\n    fp = open(file_path, 'w')\n    fp.write(file_content)\n    fp.close()\n\ndef test_file_property_and_dir_listing ():\n\n    t_repo_version = 1\n    t_repo_id = api.create_repo('test_file_property_and_dir_listing', '', USER, passwd=None)\n\n    create_the_file()\n\n    api.post_file(t_repo_id, file_path, '/', file_name, USER)\n    api.post_dir(t_repo_id, '/', dir_name, USER)\n    api.post_file(t_repo_id, file_path, '/' + dir_name, file_name, USER)\n\n    #test is_valid_filename\n    t_valid_file_name = 'valid_filename'\n    t_invalid_file_name = '/invalid_filename'\n    assert api.is_valid_filename(t_repo_id, t_valid_file_name)\n    assert api.is_valid_filename(t_repo_id, t_invalid_file_name) == 0\n\n    #test get_file_id_by_path\n    t_file_id = api.get_file_id_by_path(t_repo_id, '/test.txt')\n    assert t_file_id\n\n    #test get_dir_id_by_path\n    t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir')\n    assert t_dir_id\n\n    #test get_file_size\n    t_file_size = len(file_content)\n    assert t_file_size == api.get_file_size(t_repo_id, t_repo_version, t_file_id)\n\n    #test get_dir_size\n    t_dir_size = len(file_content)\n    assert t_dir_size == api.get_dir_size(t_repo_id, t_repo_version, t_dir_id)\n\n    #test get_file_count_info_by_path\n    t_file_count_info = api.get_file_count_info_by_path(t_repo_id , '/')\n    assert t_file_count_info.file_count == 2\n    assert t_file_count_info.dir_count == 1\n    assert t_file_count_info.size == t_file_size + t_dir_size\n\n    #test get_file_id_by_commit_and_path\n    t_file_id_tmp = t_file_id\n    t_repo = api.get_repo(t_repo_id)\n    assert t_repo\n    t_commit_id = t_repo.head_cmmt_id\n    t_file_id = api.get_file_id_by_commit_and_path(t_repo_id,\n                                                   t_commit_id,\n                                                   '/test.txt')\n\n    assert t_file_id == t_file_id_tmp\n\n    #test get_dirent_by_path\n    std_file_mode = 0o100000 | 0o644\n    t_dirent_obj = api.get_dirent_by_path(t_repo_id, '/test.txt')\n    assert t_dirent_obj\n    assert t_dirent_obj.obj_id == t_file_id\n    assert t_dirent_obj.obj_name == 'test.txt'\n    assert t_dirent_obj.mode == std_file_mode\n    assert t_dirent_obj.version == t_repo_version\n    assert t_dirent_obj.size == t_file_size\n    assert t_dirent_obj.modifier == USER\n\n    #test list_file_by_file_id\n    t_block_list =  api.list_file_by_file_id(t_repo_id, t_file_id)\n    assert t_block_list\n\n    #test list_blocks_by_file_id\n    t_block_list = api.list_blocks_by_file_id(t_repo_id, t_file_id)\n    assert t_block_list\n\n    #test list_dir_by_dir_id\n    t_dir_list = api.list_dir_by_dir_id(t_repo_id, t_dir_id)\n    assert len(t_dir_list) == 1\n\n    #test list_dir_by_path\n    t_dir_list = api.list_dir_by_path(t_repo_id, '/test_dir')\n    assert len(t_dir_list) == 1\n\n    #test get_dir_id_by_commit_and_path\n    t_dir_id = api.get_dir_id_by_commit_and_path(t_repo_id, t_commit_id, '/test_dir')\n    assert t_dir_id\n\n    #test list_dir_by_commit_and_path\n    t_dir_list = api.list_dir_by_commit_and_path(t_repo_id, t_commit_id, '/test_dir')\n    assert len(t_dir_list) == 1\n\n    #test list_dir_with_perm\n    t_dir_list = api.list_dir_with_perm(t_repo_id, '/test_dir', t_dir_id, USER)\n    assert len(t_dir_list) == 1\n\n    #test mkdir_with_parent\n    api.mkdir_with_parents (t_repo_id, '/test_dir', 'test_subdir', USER)\n    t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir/test_subdir')\n    assert t_dir_id\n\n    #test get_total_storage\n    t_total_size = api.get_total_storage()\n    t_repo_size = api.get_repo_size(t_repo_id)\n    assert t_total_size == t_repo_size\n\n    #get_total_file_number\n    time.sleep(1)\n    assert api.get_total_file_number() == 2\n\n    api.remove_repo(t_repo_id)\n"
  },
  {
    "path": "tests/test_gc/test_gc.py",
    "content": "import pytest\nimport requests\nimport os\nimport time\nfrom subprocess import run\nfrom tests.config import USER, USER2\nfrom seaserv import seafile_api as api\nfrom concurrent.futures import ThreadPoolExecutor\nfrom requests_toolbelt import MultipartEncoder\n\nfile_name = 'file.txt'\nfirst_name = 'first.txt'\nfirst_path = os.getcwd() + '/' + first_name\nfirst_content = 'Fist file content.\\r\\n'\n\nsecond_name = 'second.txt'\nsecond_content = 'Second file content.\\r\\n'\nsecond_path = os.getcwd() + '/' + second_name\n\nthird_name = 'third.txt'\nthird_path = os.getcwd() + '/' + third_name\nthird_content = 'Third file content.\\r\\n'\n\ndef create_test_file():\n    fp = open(first_path, 'w')\n    fp.write(first_content)\n    fp.close()\n    fp = open(second_path, 'w')\n    fp.write(second_content)\n    fp.close()\n    fp = open(third_path, 'w')\n    fp.write(third_content)\n    fp.close()\n\ndef del_local_files():\n    os.remove(first_path)\n    os.remove(second_path)\n    os.remove(third_path)\n\ndef create_test_dir(repo, dir_name):\n    parent_dir = '/'\n    api.post_dir(repo.id,parent_dir,dir_name,USER)\n\ndef run_gc(repo_id, rm_fs, check):\n    cmdStr = 'seafserv-gc --verbose -F /tmp/seafile-tests/conf -d /tmp/seafile-tests/seafile-data %s %s %s'%(rm_fs, check, repo_id)\n    cmd=cmdStr.split(' ')\n    ret = run (cmd)\n    assert ret.returncode == 0\n\n@pytest.mark.parametrize('rm_fs', ['', '--rm-fs'])\ndef test_gc_full_history(repo, rm_fs):\n    create_test_file()\n\n    api.set_repo_valid_since (repo.id, -1)\n\n    create_test_dir(repo,'subdir')\n    v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw')\n    assert v_repo_id is not None\n    assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0\n\n    assert api.post_empty_file(repo.id, '/', file_name, USER) == 0\n    t_repo = api.get_repo(repo.id)\n    assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id)\n    t_repo = api.get_repo(repo.id)\n    assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id)\n    t_repo = api.get_repo(repo.id)\n    assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmit_id)\n    time.sleep(1)\n\n    api.del_file(repo.id, '/', '[\\\"'+file_name+'\\\"]', USER)\n\n    run_gc(repo.id, rm_fs, '')\n    run_gc(repo.id, '', '--check')\n\n    del_local_files()\n\n@pytest.mark.parametrize('rm_fs', ['', '--rm-fs'])\ndef test_gc_no_history(repo, rm_fs):\n    create_test_file()\n\n    api.set_repo_valid_since (repo.id, 0)\n\n    create_test_dir(repo,'subdir')\n    v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw')\n    assert v_repo_id is not None\n    assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0\n\n    assert api.post_empty_file(repo.id, '/', file_name, USER) == 0\n    t_repo = api.get_repo(repo.id)\n    assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id)\n    t_repo = api.get_repo(repo.id)\n    assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id)\n    t_repo = api.get_repo(repo.id)\n    time.sleep(1)\n    assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmt_id)\n\n    time.sleep(1)\n    api.del_file(repo.id, '/', '[\\\"'+file_name+'\\\"]', USER)\n\n    run_gc(repo.id, rm_fs, '')\n    api.set_repo_valid_since (repo.id, 0)\n    run_gc(repo.id, '', '--check')\n    \n    del_local_files()\n\n@pytest.mark.parametrize('rm_fs', ['', '--rm-fs'])\ndef test_gc_partial_history(repo, rm_fs):\n    create_test_file()\n\n    create_test_dir(repo,'subdir')\n    v_repo_id = api.share_subdir_to_user(repo.id, '/subdir', USER, USER2, 'rw')\n    assert v_repo_id is not None\n    assert api.post_file(repo.id, first_path, '/subdir', file_name, USER) == 0\n\n    assert api.post_empty_file(repo.id, '/', file_name, USER) == 0\n    t_repo = api.get_repo(repo.id)\n    time.sleep(1)\n    assert api.put_file(repo.id, first_path, '/', file_name, USER, t_repo.head_cmmt_id)\n    t_repo = api.get_repo(repo.id)\n    time.sleep(1)\n    assert api.put_file(repo.id, second_path, '/', file_name, USER, t_repo.head_cmmt_id)\n\n    t_repo = api.get_repo(repo.id)\n    t_commit = api.get_commit(t_repo.id, t_repo.version, t_repo.head_cmmt_id)\n    api.set_repo_valid_since (repo.id, t_commit.ctime)\n\n    time.sleep(1)\n    assert api.put_file(repo.id, third_path, '/', file_name, USER, t_repo.head_cmmt_id)\n\n    api.del_file(repo.id, '/', '[\\\"'+file_name+'\\\"]', USER)\n\n    run_gc(repo.id, rm_fs, '')\n    run_gc(repo.id, '', '--check')\n\n    del_local_files()\n"
  },
  {
    "path": "tests/test_get_repo_list/test_get_repo_list.py",
    "content": "import pytest\n\nfrom seaserv import seafile_api as api\nfrom tests.config import USER\nfrom tests.utils import randstring\n\nattr_to_assert = ['id', 'name', 'version', 'last_modify', 'size',\n                  'last_modifier', 'head_cmmt_id', 'repo_id', 'repo_name',\n                  'last_modified', 'encrypted', 'is_virtual', 'origin_repo_id',\n                  'origin_repo_name', 'origin_path', 'store_id' ,'share_type',\n                  'permission', 'user', 'group_id']\n\ndef assert_by_attr_name (repo, repo_to_test, attr):\n    if (attr == 'id'):\n        assert getattr(repo_to_test, attr) == repo.id\n    elif (attr == 'name'):\n        assert getattr(repo_to_test, attr) == repo.name\n    elif (attr == 'size'):\n        assert getattr(repo_to_test, attr) == repo.size\n    elif (attr == 'last_modifier'):\n        assert getattr(repo_to_test, attr) == repo.last_modifier\n    elif (attr == 'head_cmmt_id'):\n        assert getattr(repo_to_test, attr) == repo.head_cmmt_id\n    elif (attr == 'repo_id'):\n        assert getattr(repo_to_test, attr) == repo.id\n    elif (attr == 'repo_name'):\n        assert getattr(repo_to_test, attr) == repo.name\n    elif (attr == 'last_modified'):\n        assert getattr(repo_to_test, attr) == repo.last_modified\n    elif (attr == 'encrypted'):\n        assert getattr(repo_to_test, attr) == repo.encrypted\n    elif (attr == 'is_virtual'):\n        assert getattr(repo_to_test, attr) == repo.is_virtual\n    elif (attr == 'origin_repo_id'):\n        assert getattr(repo_to_test, attr) == repo.origin_repo_id\n    elif (attr == 'origin_repo_name'):\n        assert getattr(repo_to_test, attr) != None\n    elif (attr == 'origin_path'):\n        assert getattr(repo_to_test, attr) == repo.origin_path\n    elif (attr == 'store_id'):\n        assert getattr(repo_to_test, attr) == repo.store_id\n    elif (attr == 'share_type'):\n        assert getattr(repo_to_test, attr) != None\n    elif (attr == 'permission'):\n        assert getattr(repo_to_test, attr) == 'rw'\n    elif (attr == 'group_id'):\n        assert getattr(repo_to_test,attr) != 0\n\ndef assert_public_repos_attr(repo, repo_to_test):\n    for attr in attr_to_assert:\n       assert hasattr(repo_to_test, attr) == True\n\n       assert hasattr(repo_to_test, 'is_virtual')\n       is_virtual = getattr(repo_to_test, 'is_virtual')\n\n       if (is_virtual == False):\n           if (attr == 'origin_repo_id' or\n               attr == 'origin_path'):\n               continue\n\n       if (attr == 'origin_repo_name'):\n           continue\n\n       if (attr == 'group_id'):\n           continue\n\n       assert_by_attr_name(repo, repo_to_test, attr)\n\ndef assert_group_repos_attr(repo, repo_to_test):\n    for attr in attr_to_assert:\n        assert hasattr(repo_to_test, attr) == True\n\n        assert hasattr(repo_to_test, 'is_virtual')\n        is_virtual = getattr(repo_to_test, 'is_virtual')\n\n        if (is_virtual == False):\n            if (attr == 'origin_repo_id' or\n                attr == 'origin_repo_name' or\n                attr == 'origin_path'):\n                continue\n\n        assert_by_attr_name(repo, repo_to_test, attr)\n\ndef test_get_group_repos(repo, group):\n    repo = api.get_repo(repo.id)\n    api.group_share_repo(repo.id, group.id, USER, 'rw')\n    repos = api.get_repos_by_group(group.id)\n    assert_group_repos_attr(repo, repos[0])\n\n    repos = api.get_group_repos_by_owner(USER)\n    assert_group_repos_attr(repo, repos[0])\n\n    v_repo_id = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, 'rw')\n    v_repo = api.get_repo(v_repo_id)\n    v_repo_to_test = api.get_group_shared_repo_by_path(repo.id, '/dir1', group.id)\n    assert_group_repos_attr(v_repo, v_repo_to_test)\n    api.unshare_subdir_for_group(repo.id, '/dir1', USER, group.id)\n\n    repos = api.get_group_repos_by_user(USER)\n    assert_group_repos_attr(repo, repos[0])\n\n    assert api.group_unshare_repo(repo.id, group.id, USER) == 0\n\ndef test_get_inner_pub_repos(repo):\n    repo = api.get_repo(repo.id)\n    api.add_inner_pub_repo(repo.id, 'rw')\n    repos = api.get_inner_pub_repo_list()\n    assert_public_repos_attr(repo, repos[0])\n\n    repos = api.list_inner_pub_repos_by_owner(USER)\n    assert_public_repos_attr(repo, repos[0])\n\n    assert api.remove_inner_pub_repo(repo.id) == 0\n"
  },
  {
    "path": "tests/test_group/test_groups.py",
    "content": "import pytest\nfrom seaserv import seafile_api as api\nfrom seaserv import ccnet_api\n\nfrom tests.config import USER, USER2\nfrom tests.utils import randstring\n\ndef test_multi_tier_groups(repo):\n    id1 = ccnet_api.create_group('group1', USER, parent_group_id=-1)\n    id2 = ccnet_api.create_group('group2', USER2, parent_group_id = id1)\n    id3 = ccnet_api.create_group('group3', USER, parent_group_id = id1)\n    id4 = ccnet_api.create_group('group4', USER2, parent_group_id = id3)\n    id5 = ccnet_api.create_group('group5', USER2, parent_group_id = 0)\n    assert id1 != -1 and id2 != -1 and id3 != -1 and id4 != -1\n\n    group1 = ccnet_api.get_group(id1)\n    group2 = ccnet_api.get_group(id2)\n    group3 = ccnet_api.get_group(id3)\n    group4 = ccnet_api.get_group(id4)\n    assert group1.parent_group_id == -1\n    assert group2.parent_group_id == id1\n    assert group3.parent_group_id == id1\n    assert group4.parent_group_id == id3\n\n    members = ccnet_api.search_group_members (id1, 'randgroup{}'.format(randstring(6)))\n    assert len(members) == 0\n    members = ccnet_api.search_group_members (id1, USER)\n    assert len(members) == 1\n    assert members[0].user_name == USER\n\n    ances_order = [id5, id4, id3, id2, id1]\n    user2_groups_with_ancestors = ccnet_api.get_groups (USER2, return_ancestors = True)\n    assert len(user2_groups_with_ancestors) == 5\n    i = 0\n    for g in user2_groups_with_ancestors:\n        assert g.id == ances_order[i]\n        i = i + 1\n\n    order = [id5, id4, id2]\n    i = 0\n    user2_groups = ccnet_api.get_groups (USER2)\n    assert len(user2_groups) == 3\n    for g in user2_groups:\n        assert g.id == order[i]\n        i = i + 1\n\n    top_groups = ccnet_api.get_top_groups(True)\n    assert len(top_groups) == 1\n    for g in top_groups:\n        assert g.parent_group_id == -1\n\n    child_order = [id2, id3]\n    i = 0\n    id1_children = ccnet_api.get_child_groups(id1)\n    assert len(id1_children) == 2\n    for g in id1_children:\n        assert g.id == child_order[i]\n        i = i + 1\n\n    group4_order = [id1, id3, id4]\n    i = 0\n    group4_ancestors = ccnet_api.get_ancestor_groups(id4)\n    assert len(group4_ancestors) == 3\n    for g in group4_ancestors:\n        assert g.id == group4_order[i]\n        i = i + 1\n\n    rm5 = ccnet_api.remove_group(id5)\n    rm4 = ccnet_api.remove_group(id4)\n    rm3 = ccnet_api.remove_group(id3)\n    rm2 = ccnet_api.remove_group(id2)\n    rm1 = ccnet_api.remove_group(id1)\n    assert rm5 == 0 and rm4 == 0 and rm3 == 0 and rm2 == 0 and rm1 == 0\n"
  },
  {
    "path": "tests/test_password/test_password.py",
    "content": "import pytest\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\n@pytest.mark.parametrize('rpc, enc_version',\n                         [('create_repo', 2), ('create_repo', 3), ('create_repo', 4),\n                          ('create_enc_repo', 2), ('create_enc_repo', 3), ('create_enc_repo', 4)])\ndef test_encrypted_repo(rpc, enc_version):\n    test_repo_name = 'test_enc_repo'\n    test_repo_desc = 'test_enc_repo'\n    test_repo_passwd = 'test_enc_repo'\n    if rpc == 'create_repo':\n        repo_id = api.create_repo(test_repo_name, test_repo_desc, USER,\n                                  test_repo_passwd, enc_version)\n        assert repo_id\n    else:\n        if enc_version == 2:\n            repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb762'\n        elif enc_version == 3:\n            repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb763'\n        else:\n            repo_id = 'd17bf8ca-3019-40ee-8fdb-0258c89fb764'\n        enc_info = api.generate_magic_and_random_key(enc_version, repo_id, test_repo_passwd)\n        assert enc_info\n        ret_repo_id = api.create_enc_repo(repo_id, test_repo_name, test_repo_desc,\n                                          USER, enc_info.magic, enc_info.random_key,\n                                          enc_info.salt, enc_version)\n        assert ret_repo_id == repo_id\n\n    repo = api.get_repo(repo_id)\n    assert repo\n    assert repo.enc_version == enc_version\n    assert len(repo.magic) == 64\n    assert len(repo.random_key) == 96\n    if enc_version == 3 or enc_version == 4:\n        assert len(repo.salt) == 64\n        \n    new_passwd = 'new password'\n\n    assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0\n    assert api.get_decrypt_key(repo.id, USER)\n    api.change_repo_passwd(repo.repo_id, test_repo_passwd, new_passwd, USER) == 0\n    assert api.set_passwd(repo.id, USER, new_passwd) == 0\n\n    assert api.is_password_set(repo.id, USER)\n    assert api.unset_passwd(repo.id, USER) == 0\n    assert api.is_password_set(repo.id, USER) == 0\n\n    api.remove_repo(repo_id)\n\n@pytest.mark.parametrize('rpc, enc_version, algo, params',\n                         [('create_repo', 2, 'pbkdf2_sha256', '1000'), ('create_repo', 3, 'pbkdf2_sha256', '1000'), ('create_repo', 4, 'pbkdf2_sha256', '1000'),\n                         ('create_repo', 2, 'argon2id', '2,102400,8'), ('create_repo', 3, 'argon2id', '2,102400,8'), ('create_repo', 4, 'argon2id', '2,102400,8')])\ndef test_pwd_hash(rpc, enc_version, algo, params):\n    test_repo_name = 'test_enc_repo'\n    test_repo_desc = 'test_enc_repo'\n    test_repo_passwd = 'test_enc_repo'\n    repo_id = api.create_repo(test_repo_name, test_repo_desc, USER,\n                              test_repo_passwd, enc_version, pwd_hash_algo=algo, pwd_hash_params=params)\n    assert repo_id\n\n    repo = api.get_repo(repo_id)\n    assert repo\n    assert repo.enc_version == enc_version\n    assert len(repo.pwd_hash) == 64\n    assert len(repo.random_key) == 96\n    if enc_version > 2:\n        assert len(repo.salt) == 64\n        \n    new_passwd = 'new password'\n\n    assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0\n    assert api.get_decrypt_key(repo.id, USER)\n    api.change_repo_passwd(repo.repo_id, test_repo_passwd, new_passwd, USER) == 0\n    assert api.set_passwd(repo.id, USER, new_passwd) == 0\n\n    assert api.is_password_set(repo.id, USER)\n    assert api.unset_passwd(repo.id, USER) == 0\n    assert api.is_password_set(repo.id, USER) == 0\n\n    api.remove_repo(repo_id)\n\n@pytest.mark.parametrize('enc_version, algo, params',\n                         [(2, 'pbkdf2_sha256', '1000'), (3, 'pbkdf2_sha256', '1000'), ( 4, 'pbkdf2_sha256', '1000'),\n                         (2, 'argon2id', '2,102400,8'), (3, 'argon2id', '2,102400,8'), (4, 'argon2id', '2,102400,8')])\ndef test_upgrade_pwd_hash(enc_version, algo, params):\n    test_repo_name = 'test_enc_repo'\n    test_repo_desc = 'test_enc_repo'\n    test_repo_passwd = 'test_enc_repo'\n    repo_id = api.create_repo(test_repo_name, test_repo_desc, USER,\n                              test_repo_passwd, enc_version)\n    assert repo_id\n\n    repo = api.get_repo(repo_id)\n    assert repo\n    assert repo.enc_version == enc_version\n    assert len(repo.random_key) == 96\n    if enc_version > 2:\n        assert len(repo.salt) == 64\n\n    api.upgrade_repo_pwd_hash_algorithm (repo.repo_id, USER, test_repo_passwd, algo, params) == 0\n\n    repo = api.get_repo(repo_id)\n    assert repo.pwd_hash_algo == algo;\n    assert repo.pwd_hash_params == params;\n    assert repo.pwd_hash\n\n    assert api.set_passwd(repo.id, USER, test_repo_passwd) == 0\n    assert api.get_decrypt_key(repo.id, USER)\n    assert api.is_password_set(repo.id, USER)\n    assert api.unset_passwd(repo.id, USER) == 0\n    assert api.is_password_set(repo.id, USER) == 0\n\n    api.remove_repo(repo_id)\n"
  },
  {
    "path": "tests/test_repo_manipulation/test_repo_manipulation.py",
    "content": "import pytest\nfrom tests.config import USER, USER2\nfrom seaserv import seafile_api as api\n\ndef get_repo_list_order_by(t_start, t_limit, order_by):\n    t_repo_list = api.get_repo_list(t_start, t_limit, order_by)\n    assert t_repo_list and len(t_repo_list)\n    if order_by == \"size\":\n        assert t_repo_list[0].size >= t_repo_list[1].size\n    if order_by == \"file_count\":\n        assert t_repo_list[0].file_count >= t_repo_list[1].file_count\n\ndef test_repo_manipulation():\n\n    #test get_system_default_repo_id\n    t_default_repo_id = api.get_system_default_repo_id()\n    assert t_default_repo_id\n\n    #test create_repo\n    t_repo_id = api.create_repo('test_repo_manipulation', '', USER, passwd=None)\n    assert t_repo_id\n\n    #test counts_repo\n    t_repo_count = 0\n    t_repo_count = api.count_repos()\n    assert t_repo_count != 0\n\n    #test get_repo ,edit_repo\n    t_new_name = 'n_name'\n    t_new_desc = 'n_desc'\n    t_repo_version = 1\n    t_repo = api.get_repo(t_repo_id)\n    assert t_repo\n\n    api.edit_repo(t_repo_id, t_new_name, t_new_desc, USER)\n    t_repo = api.get_repo(t_repo_id)\n    assert t_repo.name == t_new_name and t_repo.desc == t_new_desc\n\n    #test revert_repo and get_commit\n    t_commit_id_before_changing = t_repo.head_cmmt_id\n\n    api.post_dir(t_repo_id, '/', 'dir1', USER)\n    t_repo = api.get_repo(t_repo_id)\n\n    api.revert_repo(t_repo_id, t_commit_id_before_changing, USER)\n\n    t_repo = api.get_repo(t_repo_id)\n    t_commit_id_after_revert = t_repo.head_cmmt_id\n\n    t_commit_before_changing = api.get_commit(t_repo_id, t_repo_version, t_commit_id_before_changing)\n    t_commit_after_revert = api.get_commit(t_repo_id, t_repo_version, t_commit_id_after_revert)\n    assert t_commit_before_changing.root_id == t_commit_after_revert.root_id\n\n    #test is_repo_owner\n    assert api.is_repo_owner(USER, t_repo_id)\n    assert api.is_repo_owner(USER2, t_repo_id) == 0\n\n    #test get_repo_owner\n    owner_get = api.get_repo_owner(t_repo_id)\n    assert owner_get == USER\n\n    #test set_repo_owner\n    api.set_repo_owner(t_repo_id, USER2)\n    assert api.is_repo_owner(USER2, t_repo_id)\n\n    #test create_enc_repo\n    t_enc_repo_id = '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d'\n    t_enc_version = 2\n    t_passwd = '123'\n    magic_and_random_key = api.generate_magic_and_random_key (t_enc_version, t_enc_repo_id, t_passwd)\n    t_magic = magic_and_random_key.magic\n    t_random_key = magic_and_random_key.random_key\n    t_enc_repo_id = api.create_enc_repo (t_enc_repo_id, 'test_encrypted_repo', '', USER, t_magic, t_random_key, None, t_enc_version)\n    assert t_enc_repo_id == '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d'\n\n    #test get_repos_by_id_prefix\n    t_id_prefix = '826d1b7b'\n    t_repo_list = api.get_repos_by_id_prefix(t_id_prefix, -1, -1)\n    assert t_repo_list[0].id == '826d1b7b-f110-46f2-8d5e-7b5ac3e11f4d'\n\n    #test get_repo_list\n    #test order by None\n    order_by = None\n    get_repo_list_order_by(-1 ,-1, order_by)\n\n    #test order by size\n    order_by = \"size\"\n    get_repo_list_order_by(-1 ,-1, order_by)\n\n    #test order by file_count\n    order_by = \"file_count\"\n    get_repo_list_order_by(-1 ,-1, order_by)\n\n    t_start = 1;\n    t_limit = 1;\n    t_repo_list = api.get_repo_list(t_start, t_limit, None)\n    assert t_repo_list and len(t_repo_list) == 1\n\n    #test get_owned_repo_list\n    t_repo_list = api.get_owned_repo_list(USER2)\n    assert t_repo_list and len(t_repo_list)\n\n    #test get_commit_list\n    t_offset = 0;\n    t_limit = 0;\n    t_commit_list = api.get_commit_list(t_repo_id, t_offset, t_limit)\n    assert t_commit_list and len(t_commit_list) == 4\n\n    t_offset = 1;\n    t_limit = 1;\n    t_commit_list = api.get_commit_list(t_repo_id, t_offset, t_limit)\n    assert t_commit_list and len(t_commit_list) == 1\n\n    #test search_repos_by_name\n    t_repo_list = api.search_repos_by_name (t_repo.name)\n    assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id\n    t_repo_list = api.search_repos_by_name (t_repo.name.upper())\n    assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id\n    t_repo_list = api.search_repos_by_name (t_repo.name.lower())\n    assert len (t_repo_list) == 1 and t_repo_list[0].id == t_repo_id\n\n    #test remove_repo\n    api.remove_repo(t_repo_id)\n    t_repo = api.get_repo(t_repo_id)\n    assert t_repo == None\n"
  },
  {
    "path": "tests/test_server_config/test_server_config.py",
    "content": "import pytest\nfrom seaserv import seafile_api as api\n\ndef test_server_config():\n\n    #test_set_server_config_int and get_server_config_int\n    t_group = 't_group'\n    t_key = 't_key'\n    t_value = 1\n    api.set_server_config_int(t_group, t_key, t_value)\n    t_ret = api.get_server_config_int(t_group, t_key)\n    assert t_ret == t_value\n\n    #test_set_server_config_int64 and get_server_config_int64\n    t_group = 't_group'\n    t_key = 't_key'\n    t_value = 9223372036854775807\n    api.set_server_config_int64(t_group, t_key, t_value)\n    t_ret = api.get_server_config_int64(t_group, t_key)\n    assert t_ret == t_value\n\n    #test_set_server_config_string and get_server_config_string\n    t_group = 't_group'\n    t_key = 't_key'\n    t_value = 't_value'\n    api.set_server_config_string(t_group, t_key, t_value)\n    t_ret = api.get_server_config_string(t_group, t_key)\n    assert t_ret == t_value\n\n    #test_set_server_config_boolean and get_server_config_boolean\n    t_group = 't_group'\n    t_key = 't_key'\n    t_value = True\n    api.set_server_config_boolean(t_group, t_key, t_value)\n    t_ret = api.get_server_config_boolean(t_group, t_key)\n    assert t_ret == t_value\n\n    t_value = False\n    api.set_server_config_boolean(t_group, t_key, t_value)\n    t_ret = api.get_server_config_boolean(t_group, t_key)\n    assert t_ret == t_value\n"
  },
  {
    "path": "tests/test_share_and_perm/test_shared_repo_perm.py",
    "content": "import pytest\nimport time\nfrom seaserv import seafile_api as api\nfrom seaserv import ccnet_api\n\nfrom tests.config import ADMIN_USER, USER, USER2\nfrom tests.utils import assert_repo_with_permission\n\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_share_repo_to_user(repo, permission):\n    assert api.check_permission(repo.id, USER) == 'rw'\n    assert api.check_permission(repo.id, USER2) is None\n\n    assert api.repo_has_been_shared(repo.id) == False\n\n    api.share_repo(repo.id, USER, USER2, permission)\n    assert api.check_permission(repo.id, USER2) == permission\n\n    assert api.repo_has_been_shared(repo.id)\n\n    repos = api.get_share_in_repo_list(USER2, 0, 1)\n    assert_repo_with_permission(repo, repos, permission)\n\n    repos = api.get_share_out_repo_list(USER, 0, 1)\n    assert_repo_with_permission(repo, repos, permission)\n\n    users = api.list_repo_shared_to(USER, repo.id)\n    assert len (users) == 1\n    assert users[0].repo_id == repo.id\n    assert users[0].user == USER2\n    assert users[0].perm == permission\n\n    api.remove_share(repo.id, USER, USER2)\n    assert api.check_permission(repo.id, USER2) is None\n\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_share_repo_to_group(repo, group, permission):\n    assert api.check_permission(repo.id, USER) == 'rw'\n    assert api.check_permission(repo.id, USER2) is None\n\n    repos = api.get_repos_by_group(group.id)\n    assert len(repos) == 0\n\n    group_list = ccnet_api.get_groups(USER)\n    assert len(group_list) == 1\n    group_list = ccnet_api.get_groups(USER2)\n    assert len(group_list) == 0\n\n    api.group_share_repo(repo.id, group.id, USER, permission)\n    repos = api.get_repos_by_group(group.id)\n    assert_repo_with_permission(repo, repos, permission)\n\n    group_ids = api.get_shared_group_ids_by_repo(repo.id)\n    assert group_ids[0] == str(group.id)\n\n    group_list = api.list_repo_shared_group_by_user(USER, repo.id)\n    assert len(group_list) == 1\n    group_list = api.list_repo_shared_group_by_user(USER2, repo.id)\n    assert len(group_list) == 0\n\n    repo_get = api.get_group_shared_repo_by_path (repo.id, None, group.id)\n    assert repo_get and repo_get.repo_id == repo.id\n\n    ccnet_api.group_add_member(group.id, USER, USER2)\n    group_list = ccnet_api.get_groups(USER2)\n    assert len(group_list) == 1\n    group = group_list[0]\n    assert group.id == group.id\n\n    repos2 = api.get_repos_by_group(group.id)\n    assert_repo_with_permission(repo, repos2, permission)\n\n    assert api.check_permission(repo.id, USER2) == permission\n\n    repos = api.get_group_repos_by_user (USER)\n    assert len(repos) == 1\n\n    repoids = api.get_group_repoids(group.id)\n    assert len(repoids) == 1\n\n    repos = api.get_group_repos_by_owner(USER)\n    assert len(repos) == 1\n    api.remove_group_repos_by_owner(group.id, USER)\n    repos = api.get_group_repos_by_owner(USER)\n    assert len(repos) == 0\n\n    api.set_group_repo(repo.id, group.id, USER, permission)\n    repos = api.get_repos_by_group(group.id)\n    assert len(repos) == 1\n    api.remove_group_repos(group.id)\n    repos = api.get_repos_by_group(group.id)\n    assert len(repos) == 0\n\n    api.group_unshare_repo(repo.id, group.id, USER)\n    repos = api.get_repos_by_group(group.id)\n    assert len(repos) == 0\n\n    assert api.check_permission(repo.id, USER2) is None\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_share_dir_to_user(repo, permission):\n    v_repo_id_1 =  api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission)\n    v_repo_id_2 =  api.share_subdir_to_user(repo.id, '/dir2', USER, USER2, permission)\n    assert api.check_permission(v_repo_id_1, USER2) == permission\n    assert api.check_permission(v_repo_id_2, USER2) == permission\n\n    vir_repo_2 = api.get_shared_repo_by_path(repo.id, '/dir2', USER2)\n    assert vir_repo_2.permission == permission\n\n    users = api.get_shared_users_for_subdir(repo.id, '/dir1', USER)\n    assert len(users) == 1 and users[0].user == USER2\n\n    assert api.del_file(repo.id, '/', '[\\\"dir1\\\"]', USER) == 0\n    assert api.unshare_subdir_for_user(repo.id, '/dir2', USER, USER2) == 0\n\n    time.sleep(2.5)\n\n    assert api.get_shared_repo_by_path(repo.id, '/dir1', USER2) is None\n    assert api.get_shared_repo_by_path(repo.id, '/dir2', USER2) is None\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_share_dir_to_group(repo, group, permission):\n    assert ccnet_api.group_add_member(group.id, USER, USER2) == 0\n    v_repo_id_1 = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, permission)\n    v_repo_id_2 = api.share_subdir_to_group(repo.id, '/dir2', USER, group.id, permission)\n\n    assert api.check_permission(v_repo_id_1, USER2) == permission\n    assert api.check_permission(v_repo_id_2, USER2) == permission\n\n    repo_get = api.get_group_shared_repo_by_path (repo.id, '/dir1', group.id)\n    assert repo_get and repo_get.repo_id == v_repo_id_1\n\n    users = api.get_shared_groups_for_subdir(repo.id, '/dir1', USER)\n    assert len(users) == 1\n\n    assert api.del_file(repo.id, '/', '[\\\"dir1\\\"]', USER) == 0\n    assert api.unshare_subdir_for_group(repo.id, '/dir2', USER, group.id) == 0\n\n    time.sleep(2.5)\n\n    assert api.check_permission(v_repo_id_1, USER2) is None\n    assert api.check_permission(v_repo_id_2, USER2) is None\n\n@pytest.mark.parametrize('permission_to_share, permission_to_set', [('r', 'rw'), ('rw', 'r')])\ndef test_set_share_permission(repo,  permission_to_share, permission_to_set):\n    assert api.check_permission(repo.id, USER2) == None\n\n    api.share_repo(repo.id, USER, USER2, permission_to_share)\n    assert api.check_permission(repo.id, USER2) == permission_to_share\n\n    api.set_share_permission(repo.id, USER, USER2, permission_to_set)\n    assert api.check_permission(repo.id, USER2) == permission_to_set\n\n    api.remove_share(repo.id, USER, USER2)\n\n@pytest.mark.parametrize('permission_to_share, permission_to_set', [('r', 'rw'), ('rw', 'r')])\ndef set_group_repo_permission(repo,  group, permission_to_share, permission_to_set):\n    ccnet_api.group_add_member(group.id, USER, USER2)\n    assert api.check_permission(repo.id, USER2) == None\n\n    api.set_group_repo(repo.id, group.id, USER, permission_to_share)\n    assert api.check_permission(repo.id, USER2) == permission_to_share\n\n    api.set_group_repo_permission(group.id, repo.id, permission_to_set)\n    assert api.check_permission(repo.id, USER2) == permission_to_set\n\n    api.group_unshare_repo(repo.id, group.id, USER)\n\n@pytest.mark.parametrize('permission_to_share, permission_to_update', [('r', 'rw'), ('rw', 'r')])\ndef test_update_share_subdir_perm_for_user(repo, permission_to_share, permission_to_update):\n    v_repo_id =  api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission_to_share)\n    assert api.check_permission(v_repo_id, USER2) == permission_to_share\n\n    api.update_share_subdir_perm_for_user(repo.id, '/dir1', USER, USER2, permission_to_update)\n    assert api.check_permission(v_repo_id, USER2) == permission_to_update\n\n    api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0\n\n@pytest.mark.parametrize('permission_to_share, permission_to_update', [('r', 'rw'), ('rw', 'r')])\ndef test_update_share_subdir_perm_for_group(repo, group, permission_to_update, permission_to_share):\n    ccnet_api.group_add_member(group.id, USER, USER2)\n    v_repo_id = api.share_subdir_to_group(repo.id, '/dir1', USER, group.id, permission_to_share)\n    assert api.check_permission(v_repo_id, USER2) == permission_to_share\n\n    api.update_share_subdir_perm_for_group(repo.id, '/dir1', USER, group.id, permission_to_update)\n    assert api.check_permission(v_repo_id, USER2) == permission_to_update\n\n    api.unshare_subdir_for_group(repo.id, '/dir1', USER, group.id)\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_get_shared_users_by_repo(repo, group, permission):\n    ccnet_api.group_add_member(group.id, USER, USER2)\n    t_users = api.get_shared_users_by_repo(repo.id)\n    assert len(t_users) == 0\n\n    api.share_repo(repo.id, USER, USER2, permission)\n    api.set_group_repo(repo.id, group.id, ADMIN_USER, permission)\n    t_users = api.get_shared_users_by_repo(repo.id)\n    assert len(t_users) == 2\n\n    api.remove_share(repo.id, USER, USER2)\n    api.group_unshare_repo(repo.id, group.id, USER)\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_subdir_permission_in_virtual_repo(repo, group, permission):\n    api.post_dir(repo.id, '/dir1', 'subdir1', USER)\n    api.post_dir(repo.id, '/dir2', 'subdir2', USER)\n\n    v_repo_id_1 = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, permission)\n    v_subdir_repo_id_1 = api.create_virtual_repo(v_repo_id_1, '/subdir1', 'subdir1', 'test_desc', USER, passwd='')\n    assert api.check_permission(v_subdir_repo_id_1, USER2) == permission\n\n    assert ccnet_api.group_add_member(group.id, USER, USER2) == 0\n    v_repo_id_2 = api.share_subdir_to_group(repo.id, '/dir2', USER, group.id, permission)\n    v_subdir_repo_id_2 = api.create_virtual_repo(v_repo_id_2, '/subdir2', 'subdir2', 'test_desc', USER, passwd='')\n    assert api.check_permission(v_subdir_repo_id_2, USER2) == permission\n\n    assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0\n    assert api.unshare_subdir_for_group(repo.id, '/dir2', USER, group.id) == 0\n"
  },
  {
    "path": "tests/test_share_and_perm/test_structure_repo_perm.py",
    "content": "import pytest\nfrom seaserv import seafile_api as api \nfrom seaserv import ccnet_api\n\nfrom tests.config import ADMIN_USER, USER, USER2\n\n@pytest.mark.parametrize('permission', ['r', 'rw'])\ndef test_repo_perm_in_structure (repo, permission):\n    id1 = ccnet_api.create_group('group1', USER, parent_group_id=-1)\n    id2 = ccnet_api.create_group('group2', USER, parent_group_id = id1)\n    assert id1 != -1 and id2 != -1\n\n    # USER2 in child group (id2) has permission to access repo in parent group (id1) #\n    assert ccnet_api.group_add_member(id2, USER, USER2) != -1\n    assert api.group_share_repo(repo.id, id1, USER, permission) != -1\n    assert api.check_permission(repo.id, USER2) == permission\n\n    assert api.group_unshare_repo(repo.id, id1, USER) != -1\n    assert api.check_permission(repo.id, USER2) == None\n\n    assert ccnet_api.remove_group(id2) != -1\n    assert ccnet_api.remove_group(id1) != -1\n"
  },
  {
    "path": "tests/test_trashed_repos/test_trashed_repos.py",
    "content": "import pytest\nfrom tests.config import USER\nfrom seaserv import seafile_api as api\n\ndef test_trashed_repos(repo):\n\n    #test get_trash_repo_list\n    t_start = -1\n    t_limit = -1\n    t_trash_repos_tmp = api.get_trash_repo_list(t_start, t_limit)\n    api.remove_repo(repo.id)\n    t_trash_repos = api.get_trash_repo_list(t_start, t_limit)\n    assert len(t_trash_repos) == len(t_trash_repos_tmp) + 1\n    t_trash_repos_tmp = t_trash_repos\n\n    #test get_trash_repo_owner\n    t_owner = api.get_trash_repo_owner(repo.id)\n    assert t_owner == USER\n\n    #test restore_repo_from_trash\n    t_repo_get = api.get_repo(repo.id)\n    assert t_repo_get == None\n    api.restore_repo_from_trash(repo.id)\n    t_repo_get = api.get_repo(repo.id)\n    assert t_repo_get and t_repo_get.repo_id == repo.id\n\n    #test del_repo_from_trash\n    api.del_repo_from_trash(repo.id)\n    t_trash_repos = api.get_trash_repo_list(t_start, t_limit)\n    assert len(t_trash_repos) == len(t_trash_repos_tmp) - 1\n\n    #test get_trash_repos_by_owner\n    t_trash_repos_by_owner_tmp = api.get_trash_repos_by_owner(USER)\n    api.remove_repo(repo.id)\n    t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER)\n    assert len(t_trash_repos_by_owner) == len(t_trash_repos_by_owner_tmp) + 1\n\n    #test empty_repo_trash\n    api.empty_repo_trash()\n    t_trash_repos = api.get_trash_repo_list(t_start, t_limit)\n    assert len(t_trash_repos) == 0\n\n    #test empty_repo_trash_by_owner\n    t_repo_id = api.create_repo('test_trashed_repos', '', USER, passwd=None)\n    api.remove_repo(t_repo_id)\n    t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER)\n    assert len(t_trash_repos_by_owner) != 0\n    api.empty_repo_trash_by_owner(USER)\n    t_trash_repos_by_owner = api.get_trash_repos_by_owner(USER)\n    assert len(t_trash_repos_by_owner) == 0\n"
  },
  {
    "path": "tests/test_upload/account.conf",
    "content": "[account]\nserver = http://192.168.60.132\nusername = 123456@qq.com\npassword = 123456\nrepoid = e63f9fc8-880a-427f-b2c4-42c00538cb94\nthread_num = 1000\n"
  },
  {
    "path": "tests/test_upload/go.mod",
    "content": "module test_upload\n\ngo 1.18\n\nrequire (\n\tgithub.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d // indirect\n\tgopkg.in/ini.v1 v1.66.6 // indirect\n)\n"
  },
  {
    "path": "tests/test_upload/go.sum",
    "content": "github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=\ngithub.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d h1:7W5BeFzUFCx+xz5pINiuRJesr82pA2Gq0LZeHXBI0jE=\ngithub.com/haiwen/seafile-server/fileserver v0.0.0-20220621072834-faf434def97d/go.mod h1:3r5rRrKrYibzy1quQOR0/yvT+7L+iuAFAwTcggCp6wg=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=\ngopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\n"
  },
  {
    "path": "tests/test_upload/readme.md",
    "content": "go run test_upload.go -c accont.conf -p runtime\n"
  },
  {
    "path": "tests/test_upload/test_upload.go",
    "content": "package main\n\nimport \"fmt\"\nimport \"io\"\nimport \"sync\"\nimport \"flag\"\nimport \"log\"\nimport \"encoding/json\"\nimport \"bytes\"\nimport \"net/http\"\nimport \"mime/multipart\"\nimport \"path/filepath\"\n\nimport \"gopkg.in/ini.v1\"\nimport \"github.com/haiwen/seafile-server/fileserver/searpc\"\n\ntype Options struct {\n\tserver    string\n\tusername  string\n\tpassword  string\n\trepoID    string\n\tthreadNum int\n}\n\nvar confPath string\nvar rpcPipePath string\nvar options Options\nvar rpcclient *searpc.Client\n\nfunc init() {\n\tflag.StringVar(&confPath, \"c\", \"\", \"config file path\")\n\tflag.StringVar(&rpcPipePath, \"p\", \"\", \"rpc pipe path\")\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tpipePath := filepath.Join(rpcPipePath, \"seafile.sock\")\n\trpcclient = searpc.Init(pipePath, \"seafserv-threaded-rpcserver\")\n\n\tconfig, err := ini.Load(confPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load config file: %v\", err)\n\t}\n\tsection, err := config.GetSection(\"account\")\n\tif err != nil {\n\t\tlog.Fatal(\"No account section in config file.\")\n\t}\n\n\tkey, err := section.GetKey(\"server\")\n\tif err == nil {\n\t\toptions.server = key.String()\n\t}\n\n\tkey, err = section.GetKey(\"username\")\n\tif err == nil {\n\t\toptions.username = key.String()\n\t}\n\n\tkey, err = section.GetKey(\"password\")\n\tif err == nil {\n\t\toptions.password = key.String()\n\t}\n\tkey, err = section.GetKey(\"repoid\")\n\tif err == nil {\n\t\toptions.repoID = key.String()\n\t}\n\tkey, err = section.GetKey(\"thread_num\")\n\tif err == nil {\n\t\toptions.threadNum, _ = key.Int()\n\t}\n\n\tobjID := \"{\\\"parent_dir\\\":\\\"/\\\"}\"\n\ttoken, err := rpcclient.Call(\"seafile_web_get_access_token\", options.repoID, objID, \"upload\", options.username, false)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to get web access token\\n\")\n\t}\n\taccessToken, _ := token.(string)\n\n\turl := fmt.Sprintf(\"%s:8082/upload-api/%s\", options.server, accessToken)\n\tcontent := []byte(\"123456\")\n\n\tvar group sync.WaitGroup\n\tfor i := 0; i < options.threadNum; i++ {\n\t\tgroup.Add(1)\n\t\tgo func(i int) {\n\t\t\tvalues := make(map[string]io.Reader)\n\t\t\tvalues[\"file\"] = bytes.NewReader(content)\n\t\t\tvalues[\"parent_dir\"] = bytes.NewBuffer([]byte(\"/\"))\n\t\t\t// values[\"relative_path\"] = bytes.NewBuffer([]byte(relativePath))\n\t\t\tvalues[\"replace\"] = bytes.NewBuffer([]byte(\"0\"))\n\t\t\tform, contentType, err := createForm(values, \"111.md\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"Failed to create multipart form: %v\", err)\n\t\t\t}\n\t\t\theaders := make(map[string][]string)\n\t\t\theaders[\"Content-Type\"] = []string{contentType}\n\t\t\t// headers[\"Authorization\"] = []string{\"Token \" + accessToken.(string)}\n\t\t\tstatus, body, err := HttpCommon(\"POST\", url, headers, form)\n\n\t\t\tlog.Printf(\"[%d]upload status: %d return body: %s error: %v\\n\", i, status, string(body), err)\n\t\t\tgroup.Done()\n\t\t}(i)\n\t}\n\tgroup.Wait()\n}\n\nfunc createForm(values map[string]io.Reader, name string) (io.Reader, string, error) {\n\tbuf := new(bytes.Buffer)\n\tw := multipart.NewWriter(buf)\n\tdefer w.Close()\n\n\tfor k, v := range values {\n\t\tvar fw io.Writer\n\t\tvar err error\n\t\tif k == \"file\" {\n\t\t\tif fw, err = w.CreateFormFile(k, name); err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\tif fw, err = w.CreateFormField(k); err != nil {\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\t\tif _, err = io.Copy(fw, v); err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t}\n\n\treturn buf, w.FormDataContentType(), nil\n}\n\nfunc HttpCommon(method, url string, header map[string][]string, reader io.Reader) (int, []byte, error) {\n\treq, err := http.NewRequest(method, url, reader)\n\tif err != nil {\n\t\treturn -1, nil, err\n\t}\n\treq.Header = header\n\n\trsp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn -1, nil, err\n\t}\n\tdefer rsp.Body.Close()\n\n\tif rsp.StatusCode == http.StatusNotFound {\n\t\treturn rsp.StatusCode, nil, fmt.Errorf(\"url %s not found\", url)\n\t}\n\tbody, err := io.ReadAll(rsp.Body)\n\tif err != nil {\n\t\treturn rsp.StatusCode, nil, err\n\t}\n\n\treturn rsp.StatusCode, body, nil\n}\n\nfunc getToken() string {\n\turl := fmt.Sprintf(\"%s:8000/api2/auth-token/\", options.server)\n\theader := make(map[string][]string)\n\theader[\"Content-Type\"] = []string{\"application/x-www-form-urlencoded\"}\n\tdata := []byte(fmt.Sprintf(\"username=%s&password=%s\", options.username, options.password))\n\t_, body, err := HttpCommon(\"POST\", url, header, bytes.NewReader(data))\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\ttokenMap := make(map[string]interface{})\n\terr = json.Unmarshal(body, &tokenMap)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\ttoken, _ := tokenMap[\"token\"].(string)\n\treturn token\n}\n"
  },
  {
    "path": "tests/test_user/test_users.py",
    "content": "import pytest\nfrom seaserv import seafile_api as api\nfrom seaserv import ccnet_api\nfrom tests.utils import randstring\nfrom tests.config import USER, USER2, ADMIN_USER\n\ndef test_user_management(repo):\n    email1 = '%s@%s.com' % (randstring(6), randstring(6))\n    email2 = '%s@%s.com' % (randstring(6), randstring(6))\n    passwd1 = 'randstring(6)'\n    passwd2 = 'randstring(6)'\n\n    ccnet_api.add_emailuser(email1, passwd1, 1, 1)\n    ccnet_api.add_emailuser(email2, passwd2, 0, 0)\n\n    ccnet_email1 = ccnet_api.get_emailuser(email1)\n    ccnet_email2 = ccnet_api.get_emailuser(email2)\n    assert ccnet_email1.is_active == True\n    assert ccnet_email1.is_staff == True\n    assert ccnet_email2.is_active == False\n    assert ccnet_email2.is_staff == False\n\n    assert ccnet_api.validate_emailuser(email1, passwd1) == 0\n    assert ccnet_api.validate_emailuser(email2, passwd2) == 0\n\n    users = ccnet_api.search_emailusers('DB',email1, -1, -1)\n    assert len(users) == 1\n    user_ccnet = users[0]\n    assert user_ccnet.email == email1\n\n    user_counts = ccnet_api.count_emailusers('DB')\n    user_numbers = ccnet_api.get_emailusers('DB', -1, -1)\n\n    ccnet_api.update_emailuser('DB', ccnet_email2.id, passwd2, 1, 1)\n    email2_new = ccnet_api.get_emailuser(email2)\n    assert email2_new.is_active == True\n    assert email2_new.is_staff == True\n\n    #test group when update user id\n    id1 = ccnet_api.create_group('group1', email1, parent_group_id=-1)\n    assert id1 != -1\n    group1 = ccnet_api.get_group(id1)\n    assert group1.parent_group_id == -1\n\n    # test shared repo when update user id\n    api.share_repo(repo.id, USER, email1, \"rw\")\n    assert api.repo_has_been_shared(repo.id)\n\n    new_email1 = '%s@%s.com' % (randstring(6), randstring(6))\n    assert ccnet_api.update_emailuser_id (email1, new_email1) == 0\n\n    shared_users = api.list_repo_shared_to(USER, repo.id)\n    assert len (shared_users) == 1\n    assert shared_users[0].repo_id == repo.id\n    assert shared_users[0].user == new_email1\n    assert shared_users[0].perm == \"rw\"\n\n    api.remove_share(repo.id, USER, new_email1)\n\n    email1_groups = ccnet_api.get_groups (new_email1)\n    assert len (email1_groups) == 1\n    assert email1_groups[0].id == id1\n    rm1 = ccnet_api.remove_group(id1)\n    assert rm1 == 0\n\n    ccnet_api.remove_emailuser('DB', new_email1)\n    ccnet_api.remove_emailuser('DB', email2)\n"
  },
  {
    "path": "tests/utils.py",
    "content": "import os\nimport random\nimport string\n\nfrom seaserv import ccnet_api, seafile_api\n\n\ndef create_and_get_repo(*a, **kw):\n    repo_id = seafile_api.create_repo(*a, **kw)\n    repo = seafile_api.get_repo(repo_id)\n    return repo\n\n\ndef randstring(length=12):\n    return ''.join(random.choice(string.ascii_lowercase) for i in range(length))\n\ndef create_and_get_group(*a, **kw):\n    group_id = ccnet_api.create_group(*a, **kw)\n    group = ccnet_api.get_group(group_id)\n    return group\n\ndef assert_repo_with_permission(r1, r2, permission):\n    if isinstance(r2, list):\n        assert len(r2) == 1\n        r2 = r2[0]\n    assert r2.id == r1.id\n    assert r2.permission == permission\n"
  },
  {
    "path": "tools/Makefile.am",
    "content": "\n#AM_CPPFLAGS = @GLIB2_CFLAGS@\n\nEXTRA_DIST = seafile-admin\n\nbin_SCRIPTS = seafile-admin\n"
  },
  {
    "path": "tools/seafile-admin",
    "content": "#!/usr/bin/env python\n# coding: UTF-8\n'''This is the helper script to setup/manage your seafile server\n'''\n\nimport sys\n\n####################\n### Requires Python 2.6+\n####################\nif sys.version_info.major == 3:\n    print 'Python 3 not supported yet. Quit now'\n    sys.exit(1)\nif sys.version_info.minor < 6:\n    print 'Python 2.6 or above is required. Quit now'\n    sys.exit(1)\n\nimport os\nimport time\nimport re\nimport shutil\nimport subprocess\nimport argparse\nimport uuid\n\ntry:\n    import readline\n    # Avoid pylint 'unused import' warning\n    dummy = readline\nexcept ImportError:\n    pass\n\n####################\n### Cosntants\n####################\nSERVER_MANUAL_HTTP = 'https://github.com/haiwen/seafile/wiki'\nSEAFILE_GOOGLE_GROUP = 'https://groups.google.com/forum/?fromgroups#!forum/seafile'\nSEAFILE_WEBSITE = 'http://www.seafile.com'\nSEAHUB_DOWNLOAD_URL = 'https://seafile.com.cn/downloads/seahub-latest.tar.gz'\n\n####################\n### Global variables\n####################\ncwd = os.getcwd()\nSCRIPT_NAME = os.path.basename(sys.argv[0])\n\nPYTHON = sys.executable\n\nconf = {}\nCONF_CCNET_DIR = 'ccnet_dir'\nCONF_SEAFILE_DIR = 'seafile_dir'\nCONF_SEAHUB_DIR = 'seafile_dir'\nCONF_SEAFILE_PORT = 'seafile_port'\nCONF_FILESERVER_PORT = 'fileserver_port'\nCONF_IP_OR_DOMAIN = 'ip_or_domain'\n\nCONF_SEAHUB_CONF = 'seahub_conf'\nCONF_SEAHUB_DIR = 'seahub_dir'\nCONF_SEAHUB_PORT = 'seahub_port'\n\nCONF_SEAHUB_PIDFILE = 'seahub_pidfile'\nCONF_SEAHUB_OUTLOG = 'seahub_outlog'\nCONF_SEAHUB_ERRLOG = 'seahub_errlog'\n\nCONF_CCNET_CONF_EXISTS = 'ccnet_conf_exists'\nCONF_SEAFILE_CONF_EXISTS = 'seafile_conf_exists'\n\nCONF_ADMIN_EMAIL = 'admin_email'\nCONF_ADMIN_PASSWORD = 'admin_password'\nCONF_SEAFILE_CENTRAL_CONF_DIR = 'central_config_dir'\n\n####################\n### Common helper functions\n\n\ndef highlight(content):\n    '''Add ANSI color to content to get it highlighted on terminal'''\n    return '\\x1b[33m%s\\x1b[m' % content\n\n\ndef info(msg):\n    print msg\n\n\ndef error(msg):\n    print 'Error: ' + msg\n    sys.exit(1)\n\n\ndef ask_question(desc,\n                 key=None,\n                 note=None,\n                 default=None,\n                 validate=None,\n                 yes_or_no=False,\n                 invalidate_msg=None):\n    '''Ask a question, return the answer. The optional validate param is a\n    function used to validate the answer. If yes_or_no is True, then a boolean\n    value would be returned.\n\n    '''\n    assert key or yes_or_no\n    desc = highlight(desc)\n    if note:\n        desc += '  (%s)' % note\n    if default:\n        desc += '\\n' + ('[default %s ]' % default)\n    else:\n        if yes_or_no:\n            desc += '\\n[yes or no]'\n        else:\n            desc += '\\n' + ('[%s ]' % key)\n\n    desc += '  '\n    while True:\n        answer = raw_input(desc)\n        if not answer:\n            if default:\n                print ''\n                return default\n            else:\n                continue\n\n        answer = answer.strip()\n\n        if yes_or_no:\n            if answer != 'yes' and answer != 'no':\n                print '\\nPlease answer yes or no\\n'\n                continue\n            else:\n                return answer == 'yes'\n        else:\n            if validate and not validate(answer):\n                if invalidate_msg:\n                    print '\\n%s\\n' % invalidate_msg\n                else:\n                    print '\\n\"%s\" is not a valid %s\\n' % (answer, key)\n                continue\n\n        print ''\n        return answer\n\n\ndef run_argv(argv,\n             cwd=None,\n             env=None,\n             suppress_stdout=False,\n             suppress_stderr=False):\n    '''Run a program and wait it to finish, and return its exit code. The\n    standard output of this program is supressed.\n\n    '''\n    with open(os.devnull, 'w') as devnull:\n        if suppress_stdout:\n            stdout = devnull\n        else:\n            stdout = sys.stdout\n\n        if suppress_stderr:\n            stderr = devnull\n        else:\n            stderr = sys.stderr\n\n        proc = subprocess.Popen(argv,\n                                cwd=cwd,\n                                stdout=stdout,\n                                stderr=stderr,\n                                env=env)\n        return proc.wait()\n\n\ndef run(cmdline,\n        cwd=None,\n        env=None,\n        suppress_stdout=False,\n        suppress_stderr=False):\n    '''Like run_argv but specify a command line string instead of argv'''\n    with open(os.devnull, 'w') as devnull:\n        if suppress_stdout:\n            stdout = devnull\n        else:\n            stdout = sys.stdout\n\n        if suppress_stderr:\n            stderr = devnull\n        else:\n            stderr = sys.stderr\n\n        proc = subprocess.Popen(cmdline,\n                                cwd=cwd,\n                                stdout=stdout,\n                                stderr=stderr,\n                                env=env,\n                                shell=True)\n        return proc.wait()\n\n\ndef is_running(process):\n    '''Detect if there is a process with the given name running'''\n    argv = ['pgrep', '-f', process]\n\n    return run_argv(argv, suppress_stdout=True) == 0\n\n\ndef pkill(process):\n    '''Kill the program with the given name'''\n    argv = ['pkill', '-f', process]\n\n    run_argv(argv)\n\n\ndef kill(pid):\n    '''Kill the program with the given pid'''\n    argv = ['kill', pid]\n\n    run_argv(argv)\n\n\ndef must_mkdir(path):\n    '''Create a directory, exit on failure'''\n    try:\n        os.mkdir(path)\n    except OSError, e:\n        error('failed to create directory %s:%s' % (path, e))\n\n### END of Common helper functions\n####################\n\n\ndef check_seafile_install():\n    '''Check if seafile has been correctly built and installed in this\n    system\n\n    '''\n    dirs = os.environ['PATH'].split(':')\n\n    def exist_in_path(prog):\n        '''Test whether prog exists in system path'''\n        for d in dirs:\n            if d == '':\n                continue\n            path = os.path.join(d, prog)\n            if os.path.exists(path):\n                return True\n\n        return False\n\n    def check_prog(name):\n        if not exist_in_path(name):\n            error(\n                '%s not found in PATH. Have you built and installed seafile server?'\n                % name)\n\n    progs = [\n        'ccnet-init',\n        'seaf-server-init',\n        'seaf-server',\n        'ccnet-server',\n        'seafile-controller',\n    ]\n\n    for prog in progs:\n        check_prog(prog)\n\n\ndef get_seahub_env():\n    '''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is\n    needed by seahub\n\n    '''\n    seahub_dir = conf[CONF_SEAHUB_DIR]\n    seahub_thirdpart_dir = os.path.join(seahub_dir, 'thirdpart')\n\n    env = dict(os.environ)\n    pypath = env.get('PYTHONPATH', '')\n\n    pathlist = [p for p in pypath.split(':') if p != '']\n    pathlist.append(seahub_thirdpart_dir)\n    newpypath = ':'.join(pathlist)\n    env['PYTHONPATH'] = newpypath\n    env['CCNET_CONF_DIR'] = conf[CONF_CCNET_DIR]\n    env['SEAFILE_CONF_DIR'] = conf[CONF_SEAFILE_DIR]\n    env['SEAFILE_CENTRAL_CONF_DIR'] = conf[CONF_SEAFILE_CENTRAL_CONF_DIR]\n    return env\n\n\n####################\n### <setup> command\n####################\ndef welcome():\n    '''Show welcome message when running the <setup> command'''\n    welcome_msg = '''\\\n-----------------------------------------------------------------\nThis script will guide you to config and setup your seafile server.\nMake sure you have read seafile server manual at\n\n        %s\n\nPress [ENTER] to continue\n-----------------------------------------------------------------\n''' % SERVER_MANUAL_HTTP\n    print welcome_msg\n    raw_input()\n\ndef get_server_ip_or_domain():\n    def validate(s):\n        r = r'^[^.].+\\..+[^.]$'\n        return bool(re.match(r, s))\n\n    question = 'What is the ip of the server?'\n    key = 'ip or domain'\n    note = 'For example: www.mycompany.com, 192.168.1.101'\n    conf[CONF_IP_OR_DOMAIN] = ask_question(question,\n                                           key=key,\n                                           note=note,\n                                           validate=validate)\n\n\ndef get_ccnet_conf_dir():\n    ccnet_conf_dir = os.path.join(cwd, 'ccnet')\n\n    if os.path.exists(ccnet_conf_dir):\n        question = 'It seems there already exists ccnet config files in %s, Do you want to use them?' % ccnet_conf_dir\n        yesno = ask_question(question, yes_or_no=True)\n        if not yesno:\n            print highlight(\n                '\\nRemove the directory %s first, and run the script again.\\n'\n                % ccnet_conf_dir)\n            sys.exit(1)\n        else:\n            conf[CONF_CCNET_CONF_EXISTS] = True\n    else:\n        conf[CONF_CCNET_CONF_EXISTS] = False\n\n    conf[CONF_CCNET_DIR] = ccnet_conf_dir\n\n\ndef get_seafile_port():\n    def validate(s):\n        try:\n            port = int(s)\n        except ValueError:\n            return False\n\n        return port > 0 and port < 65536\n\n    question = 'Which port do you want to use for the seafile server?'\n    key = 'seafile server port'\n    default = '12001'\n    conf[CONF_SEAFILE_PORT] = ask_question(question,\n                                           key=key,\n                                           default=default,\n                                           validate=validate)\n\n\ndef get_fileserver_port():\n    def validate(s):\n        try:\n            port = int(s)\n        except ValueError:\n            return False\n\n        return port > 0 and port < 65536\n\n    question = 'Which port do you want to use for the seafile fileserver?'\n    key = 'seafile fileserver port'\n    default = '8082'\n    conf[CONF_FILESERVER_PORT] = ask_question(question,\n                                              key=key,\n                                              default=default,\n                                              validate=validate)\n\n\ndef get_seafile_data_dir():\n    question = 'Where do you want to put your seafile data?'\n    key = 'seafile-data'\n    note = 'Please use a volume with enough free space'\n    default = os.path.join(cwd, 'seafile-data')\n    seafile_data_dir = ask_question(question,\n                                    key=key,\n                                    note=note,\n                                    default=default)\n\n    if os.path.exists(seafile_data_dir):\n        question = 'It seems there already exists seafile data in %s, Do you want to use them?' % seafile_data_dir\n        yesno = ask_question(question, yes_or_no=True)\n        if not yesno:\n            print highlight(\n                '\\nRemove the directory %s first, and run the script again.\\n'\n                % seafile_data_dir)\n            sys.exit(1)\n        else:\n            conf[CONF_SEAFILE_CONF_EXISTS] = True\n    else:\n        conf[CONF_SEAFILE_CONF_EXISTS] = False\n\n    conf[CONF_SEAFILE_DIR] = seafile_data_dir\n\n\ndef create_gunicorn_conf():\n    runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime')\n    confpath = os.path.join(runtime_dir, 'seahub.conf')\n\n    if os.path.exists(confpath):\n        return\n\n    if not os.path.exists(runtime_dir):\n        must_mkdir(runtime_dir)\n\n    content = '''\\\nimport os\ndaemon = True\nworkers = 3\n\n# Logging\nruntime_dir = os.path.dirname(__file__)\npidfile = os.path.join(runtime_dir, 'seahub.pid')\nerrorlog = os.path.join(runtime_dir, 'error.log')\naccesslog = os.path.join(runtime_dir, 'access.log')\n'''\n\n    try:\n        with open(confpath, 'w') as fp:\n            fp.write(content)\n    except:\n        error('Failed to write seahub config')\n\n\ndef gen_seahub_secret_key():\n    data = str(uuid.uuid4()) + str(uuid.uuid4())\n    return data[:40]\n\n\ndef create_seahub_settings_py():\n    seahub_settings_py = os.path.join(cwd, 'conf', 'seahub_settings.py')\n    try:\n        with open(seahub_settings_py, 'w') as fp:\n            line = \"SECRET_KEY = '%s'\" % gen_seahub_secret_key()\n            fp.write(line)\n    except Exception, e:\n        error('failed to create %s: %s' % (seahub_settings_py, e))\n\n\ndef move_avatar():\n    seahub_data_dir = os.path.join(cwd, 'seahub-data')\n    outside_avatar_dir = os.path.join(seahub_data_dir, 'avatars')\n    seahub_avatar_dir = os.path.join(conf[CONF_SEAHUB_DIR], 'media', 'avatars')\n\n    if os.path.exists(outside_avatar_dir):\n        return\n\n    if not os.path.exists(seahub_data_dir):\n        must_mkdir(seahub_data_dir)\n\n    # move the avatars dir outside\n    shutil.move(seahub_avatar_dir, outside_avatar_dir)\n    # make the the original avatars dir a symlink pointing to the outside dir\n    os.symlink(outside_avatar_dir, seahub_avatar_dir)\n\n\ndef init_seahub():\n    seahub_dir = conf[CONF_SEAHUB_DIR]\n\n    # create seahub_settings.py\n    create_seahub_settings_py()\n\n    argv = [PYTHON, 'manage.py', 'syncdb']\n    # Set proper PYTHONPATH before run django syncdb command\n    env = get_seahub_env()\n\n    print\n    print\n    info('Now initializing seahub database, please wait...')\n    print\n\n    if run_argv(argv, cwd=seahub_dir, env=env) != 0:\n        error('Seahub syncdb failed')\n\n    info('done')\n\n    move_avatar()\n    create_gunicorn_conf()\n\n\ndef check_django_version():\n    '''Requires django 1.8'''\n    import django\n    if django.VERSION[0] != 1 or django.VERSION[1] != 8:\n        error('Django 1.8 is required')\n    del django\n\n\ndef check_python_module(import_name, package_name=None, silent=False):\n    package_name = package_name or import_name\n    if not silent:\n        info('checking %s' % package_name)\n    try:\n        __import__(import_name)\n    except ImportError:\n        error('Python module \"%s\" not found. Please install it first' %\n              package_name)\n\n\ndef check_python_dependencies(silent=False):\n    '''Ensure all python libraries we need are installed'''\n\n    if not silent:\n        info('check python modules ...')\n    check_django_version()\n    def check(*a, **kw):\n        kw.setdefault('silent', silent)\n        check_python_module(*a, **kw)\n    pkgs = [\n        'sqlite3',\n        'chardet',\n        'six',\n        'pytz',\n        'rest_framework',\n        'compressor',\n        'statici18n',\n        'jsonfield',\n        'dateutil',\n        'constance',\n        'openpyxl',\n    ] # yapf: disable\n    for pkg in pkgs:\n        check(pkg)\n    check('PIL', 'python imaging library(PIL)')\n\n    print\n\n\ndef config_ccnet_seafile():\n    get_ccnet_conf_dir()\n    if not conf[CONF_CCNET_CONF_EXISTS]:\n        get_server_ip_or_domain()\n\n    get_seafile_data_dir()\n    if not conf[CONF_SEAFILE_CONF_EXISTS]:\n        get_seafile_port()\n        get_fileserver_port()\n\n    info('This is your configuration')\n    info('------------------------------------------')\n    if conf[CONF_CCNET_CONF_EXISTS]:\n        info('ccnet config:        use existing config in %s' %\n             highlight(conf[CONF_CCNET_DIR]))\n    else:\n        info('ccnet conf dir:           %s' % highlight(conf[CONF_CCNET_DIR]))\n        info('server host:              %s' %\n             highlight(conf[CONF_IP_OR_DOMAIN]))\n\n    if conf[CONF_SEAFILE_CONF_EXISTS]:\n        info('seafile:             use existing config in %s' %\n             highlight(conf[CONF_SEAFILE_DIR]))\n    else:\n        info('seafile data dir:         %s' %\n             highlight(conf[CONF_SEAFILE_DIR]))\n        info('seafile port:             %s' %\n             highlight(conf[CONF_SEAFILE_PORT]))\n        info('seafile fileserver port:  %s' %\n             highlight(conf[CONF_FILESERVER_PORT]))\n\n    info('------------------------------------------')\n    info('Press ENTER if the config is right, or anything else to re-config ')\n\n    if raw_input() != '':\n        config_ccnet_seafile()\n    else:\n        return\n\n\ndef init_ccnet_seafile():\n    if not conf[CONF_CCNET_CONF_EXISTS]:\n        info('Generating ccnet configuration...')\n        argv = [\n            'ccnet-init',\n            '-F',\n            conf[CONF_SEAFILE_CENTRAL_CONF_DIR],\n            '-c',\n            conf[CONF_CCNET_DIR],\n            '--host',\n            conf[CONF_IP_OR_DOMAIN],\n        ]\n\n        if run_argv(argv) != 0:\n            error('failed to init ccnet configuration')\n\n        info('done')\n\n    if not conf[CONF_SEAFILE_CONF_EXISTS]:\n        info('Generating seafile configuration...')\n        argv = [\n            'seaf-server-init',\n            '-F',\n            conf[CONF_SEAFILE_CENTRAL_CONF_DIR],\n            '--seafile-dir',\n            conf[CONF_SEAFILE_DIR],\n            '--port',\n            conf[CONF_SEAFILE_PORT],\n            '--fileserver-port',\n            conf[CONF_FILESERVER_PORT],\n        ]\n\n        if run_argv(argv) != 0:\n            error('failed to init seafile configuration')\n\n        info('done')\n\n    seafile_ini = os.path.join(conf[CONF_CCNET_DIR], 'seafile.ini')\n    with open(seafile_ini, 'w') as fp:\n        fp.write(conf[CONF_SEAFILE_DIR])\n\n\n####################\n### <start> command\n####################\ndef start_controller():\n    argv = [\n        'seafile-controller',\n        '-c',\n        conf[CONF_CCNET_DIR],\n        '-d',\n        conf[CONF_SEAFILE_DIR],\n        '-F',\n        conf[CONF_SEAFILE_CENTRAL_CONF_DIR],\n    ]\n\n    info('Starting seafile-server...')\n    if run_argv(argv) != 0:\n        error('Failed to start seafile')\n\n    # check again after several seconds\n    time.sleep(10)\n\n    if not is_running('seafile-controller'):\n        error('Failed to start seafile')\n\n\ndef start_seahub_gunicorn():\n    argv = [\n        'gunicorn',\n        'seahub.wsgi:application',\n        '-c',\n        conf[CONF_SEAHUB_CONF],\n        '-b',\n        '0.0.0.0:%s' % conf[CONF_SEAHUB_PORT],\n    ]\n\n    info('Starting seahub...')\n    env = get_seahub_env()\n    if run_argv(argv, cwd=conf[CONF_SEAHUB_DIR], env=env) != 0:\n        error('Failed to start seahub')\n\n    info('Seahub running on port %s' % conf[CONF_SEAHUB_PORT])\n\n\ndef start_seahub_fastcgi():\n    info('Starting seahub in fastcgi mode...')\n    argv = [\n        PYTHON,\n        'manage.py',\n        'runfcgi',\n        'host=%(host)s',\n        'port=%(port)s',\n        'pidfile=%(pidfile)s',\n        'outlog=%(outlog)s',\n        'errlog=%(errlog)s',\n    ]\n\n    host = os.environ.get('SEAFILE_FASTCGI_HOST', '127.0.0.1')\n    cmdline = ' '.join(argv) % \\\n              dict(host=host,\n                   port=conf[CONF_SEAHUB_PORT],\n                   pidfile=conf[CONF_SEAHUB_PIDFILE],\n                   outlog=conf[CONF_SEAHUB_OUTLOG],\n                   errlog=conf[CONF_SEAHUB_ERRLOG])\n\n    env = get_seahub_env()\n\n    if run(cmdline, cwd=conf[CONF_SEAHUB_DIR], env=env) != 0:\n        error('Failed to start seahub in fastcgi mode')\n\n    info('Seahub running on port %s (fastcgi)' % conf[CONF_SEAHUB_PORT])\n\n\ndef read_seafile_data_dir(ccnet_conf_dir):\n    '''Read the location of seafile-data from seafile.ini, also consider the\n    upgrade from older version which do not has the seafile.ini feature\n\n    '''\n    seafile_ini = os.path.join(ccnet_conf_dir, 'seafile.ini')\n    if os.path.exists(seafile_ini):\n        with open(seafile_ini, 'r') as fp:\n            seafile_data_dir = fp.read().strip()\n    else:\n        # In previous seafile-admin, seafiled-data folder must be under\n        # the top level directory, so we do not store the location of\n        # seafile-data folder in seafile.ini\n        seafile_data_dir = os.path.join(cwd, 'seafile-data')\n        if os.path.exists(seafile_data_dir):\n            with open(seafile_ini, 'w') as fp:\n                fp.write(seafile_data_dir)\n\n    return seafile_data_dir\n\n\ndef check_layout(args):\n    def error_not_found(path):\n        error('%s not found' % path)\n\n    ccnet_conf_dir = os.path.join(cwd, 'ccnet')\n    if not os.path.exists(ccnet_conf_dir):\n        error_not_found(ccnet_conf_dir)\n\n    central_config_dir = os.path.join(cwd, 'conf')\n\n    ccnet_conf = os.path.join(central_config_dir, 'ccnet.conf')\n    if not os.path.exists(ccnet_conf):\n        error_not_found(ccnet_conf)\n\n    seafile_data_dir = read_seafile_data_dir(ccnet_conf_dir)\n    if not os.path.exists(seafile_data_dir):\n        error_not_found(seafile_data_dir)\n\n    seafile_conf = os.path.join(central_config_dir, 'seafile.conf')\n    if not os.path.exists(seafile_conf):\n        error_not_found(seafile_conf)\n\n    runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime')\n    seahub_conf = os.path.join(runtime_dir, 'seahub.conf')\n    if not os.path.exists(seahub_conf):\n        error_not_found(seahub_conf)\n\n    seahub_dir = os.path.join(cwd, 'seafile-server', 'seahub')\n    if not os.path.exists(seahub_conf):\n        error_not_found(seahub_dir)\n\n    conf[CONF_SEAFILE_CENTRAL_CONF_DIR] = central_config_dir\n    conf[CONF_CCNET_DIR] = ccnet_conf_dir\n    conf[CONF_SEAFILE_DIR] = seafile_data_dir\n    conf[CONF_SEAHUB_DIR] = seahub_dir\n    conf[CONF_SEAHUB_CONF] = seahub_conf\n    conf[CONF_SEAHUB_PIDFILE] = os.path.join(runtime_dir, 'seahub.pid')\n    conf[CONF_SEAHUB_OUTLOG] = os.path.join(runtime_dir, 'access.log')\n    conf[CONF_SEAHUB_ERRLOG] = os.path.join(runtime_dir, 'error.log')\n\n\ndef check_config(args):\n    check_layout(args)\n\n    try:\n        port = int(args.port)\n    except ValueError:\n        error('invalid port: %s' % args.port)\n    else:\n        if port <= 0 or port > 65535:\n            error('invalid port: %s' % args.port)\n\n    conf[CONF_SEAHUB_PORT] = port\n\n\ndef check_directory_layout():\n    seaf_server_dir = os.path.join(cwd, 'seafile-server')\n    if not os.path.exists(seaf_server_dir):\n        error(\n            '\"seafile-server/\" not found in current directory. \\nPlease run seafile-admin in the correct directory.')\n\n    seahub_dir = os.path.join(seaf_server_dir, 'seahub')\n    if not os.path.exists(seahub_dir):\n        error(\n            '\"seafile-server/seahub/\" not found. \\nPlease download seahub first.')\n\n    conf[CONF_SEAHUB_DIR] = seahub_dir\n\n\ndef setup_seafile(args):\n    # avoid pylint \"unused variable\" warning\n    dummy = args\n\n    welcome()\n    check_python_dependencies()\n    conf[CONF_SEAFILE_CENTRAL_CONF_DIR] = os.path.join(cwd, 'conf')\n    config_ccnet_seafile()\n    init_ccnet_seafile()\n    init_seahub()\n\n    print\n    print '-----------------------------------------------------------------'\n    print '-----------------------------------------------------------------'\n    print 'Your seafile server configuration has been finished successfully.'\n    print '-----------------------------------------------------------------'\n    print '-----------------------------------------------------------------'\n    print\n    print 'To start/stop seafile server:'\n    print\n    print highlight('         $ cd %s' % cwd)\n    print highlight('         $ %s { start | stop }' % SCRIPT_NAME)\n    print\n    print 'If you have any problem, refer to\\n'\n    print\n    print ' Seafile server manual:      %s' % SERVER_MANUAL_HTTP\n    print\n    print ' Seafile discussion group:   %s' % SEAFILE_GOOGLE_GROUP\n    print\n    print ' Seafile website:            %s' % SEAFILE_WEBSITE\n    print\n    print 'for more information.'\n    print\n\n\ndef check_necessary_files():\n    files = [\n        os.path.join(cwd, 'conf', 'ccnet.conf'),\n        os.path.join(cwd, 'seafile-server', 'runtime', 'seahub.conf'),\n        os.path.join(cwd, 'seahub.db'),\n        os.path.join(cwd, 'conf', 'seahub_settings.py'),\n    ]\n\n    for fpath in files:\n        if not os.path.exists(fpath):\n            error('%s not found' % fpath)\n\n\ndef start_seafile(args):\n    '''start ccnet/seafile/seahub/fileserver'''\n    if is_running('seafile-controller'):\n        error(highlight('NOTE: Seafile is already running'))\n\n    check_python_dependencies(silent=True)\n    if args.fastcgi:\n        check_python_module('flup', 'flup', silent=True)\n    else:\n        check_python_module('gunicorn', 'gunicorn', silent=True)\n\n    check_necessary_files()\n\n    check_config(args)\n\n    start_controller()\n\n    if args.port:\n        try:\n            port = int(args.port)\n        except ValueError:\n            error('invalid port: %s' % args.port)\n        else:\n            if port <= 0 or port > 65535:\n                error('invalid port: %s' % args.port)\n\n    if args.fastcgi:\n        start_seahub_fastcgi()\n    else:\n        start_seahub_gunicorn()\n\n    info('Done')\n\n\ndef stop_seafile(dummy):\n    info('Stopping seafile server')\n    pkill('seafile-controller')\n    runtime_dir = os.path.join(cwd, 'seafile-server', 'runtime')\n    pidfile = os.path.join(runtime_dir, 'seahub.pid')\n    try:\n        with open(pidfile, 'r') as fp:\n            pid = fp.read().strip('\\n ')\n            if pid:\n                kill(pid)\n    except:\n        pass\n\n    info('done')\n\n\ndef reset_admin(args):\n    '''reset seafile admin account'''\n    check_layout(args)\n    env = get_seahub_env()\n\n    argv = [PYTHON, 'manage.py', 'createsuperuser']\n\n    env = get_seahub_env()\n    seahub_dir = conf[CONF_SEAHUB_DIR]\n    run_argv(argv, cwd=seahub_dir, env=env)\n\n\ndef main():\n    check_seafile_install()\n    check_directory_layout()\n\n    parser = argparse.ArgumentParser()\n    subparsers = parser.add_subparsers(title='subcommands', description='')\n\n    parser_setup = subparsers.add_parser('setup',\n                                         help='setup the seafile server')\n    parser_setup.set_defaults(func=setup_seafile)\n\n    parser_start = subparsers.add_parser('start',\n                                         help='start the seafile server')\n    parser_start.set_defaults(func=start_seafile)\n\n    parser_start.add_argument('--fastcgi',\n                              help='start seahub in fastcgi mode',\n                              action='store_true')\n\n    parser_start.add_argument('--port',\n                              help='start seahub in fastcgi mode',\n                              default='8000')\n\n    parser_stop = subparsers.add_parser('stop', help='stop the seafile server')\n    parser_stop.set_defaults(func=stop_seafile)\n\n    parser_reset_admin = subparsers.add_parser(\n        'reset-admin',\n        help='reset seafile admin account')\n    parser_reset_admin.set_defaults(func=reset_admin)\n\n    parser_create_admin = subparsers.add_parser(\n        'create-admin',\n        help='create seafile admin account')\n    parser_create_admin.set_defaults(func=reset_admin)\n\n    args = parser.parse_args()\n    args.func(args)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "updateversion.sh",
    "content": "#! /bin/sh\n\nif [ $# != \"2\" ]; then\n    echo \"$0 <old_version> <new_version>\"\n    exit\nfi\n\nold_ver=$1\nnew_ver=$2\n\nif test \"$(uname)\" = \"Darwin\"; then\n    sed -i '' -e \"s|$old_ver|$new_ver|\" web/setup_mac.py\n    sed -i '' -e \"s|VERSION=$old_ver|VERSION=$new_ver|\" setupmac.sh\n    sed -i '' -e \"s|<string>$old_ver</string>|<string>$new_ver</string>|\" gui/mac/seafile/seafile/*.plist\nelse\n    sed -i  \"s|$old_ver|$new_ver|\" web/setup_mac.py\n    sed -i \"s|VERSION=$old_ver|VERSION=$new_ver|\" setupmac.sh\n    sed -i \"s|<string>$old_ver</string>|<string>$new_ver</string>|\" gui/mac/seafile/seafile/*.plist\n\nfi\n\n"
  }
]